diff --git "a/438.jsonl" "b/438.jsonl" new file mode 100644--- /dev/null +++ "b/438.jsonl" @@ -0,0 +1,532 @@ +{"seq_id":"467640864","text":"#author:Svab\n#title:Died Story\n#version:0.1\n#date:2018.12.24.\n\n\n#csomagok\n\nimport random\n\n\n# Alap Statok\nelet = int(0) #amennyi napot elt\n\negeszseg = int(100)\nboldogsag = int(100)\nnepszeruseg = int(100) # social life\npenz = int(100) # random kezdes: nyomor, atlagos, luxus\n\n# az esemeny ebbe a listaba fog kerulni, hogy ki lehessen irni\n\neletsztori = []\n\n\n\n# par esemeny tesztelni\n\ndef lottonyeres():\n global boldogsag\n boldogsag = boldogsag + 20\n global penz\n penz = penz + 100\n eletsztori.append('-Megnyerte a lottot.')\n\ndef tuloratfizetnek():\n global penz\n penz = penz + 5\n global boldogsag\n boldogsag = boldogsag + 10\n eletsztori.append('-Kifizettek a tulorajat.')\n\ndef szocialissegely():\n global penz\n global boldogsag\n global nepszeruseg\n penz = penz + 4\n boldogsag = boldogsag + 2\n nepszeruseg = nepszeruseg - 5\n eletsztori.append('-Szocialis segelyt kapott.')\n\ndef penzttalalt():\n global penz\n global boldogsag\n penz = penz + 3\n boldogsag = boldogsag + 5\n eletsztori.append('-Penzt talalt.')\n\ndef penztorokolt():\n global penz\n global boldogsag\n global nepszeruseg\n penz = penz + 15\n boldogsag = boldogsag + 5\n nepszeruseg = nepszeruseg + 8\n eletsztori.append('-Penzt orokolt.')\n\ndef rokonoklatogatnak():\n global boldogsag\n boldogsag = boldogsag + 3\n eletsztori.append('-Meglatogattak a rokonai.')\n\ndef rokonodmeghal():\n global boldogsag\n global egeszseg\n boldogsag = boldogsag - 15\n egeszseg = egeszseg -2\n eletsztori.append('-Meghalt egy rokona.')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n#Ezt a listat kell meghivni, ebbe vannak a funkciok.\n\nesemenyek = [lottonyeres, tuloratfizetnek, szocialissegely, penzttalalt, penztorokolt, rokonoklatogatnak, rokonodmeghal]\n\n\n\n\n#jatek\nwhile boldogsag > 0 or egeszseg > 0 or nepszeruseg > 0 or penz > 0:\n random.choice(esemenyek)()\n elet = elet+1\n\n if boldogsag < 1 or egeszseg < 1 or nepszeruseg < 1 or penz < 1:\n break\nprint('\\n'.join(eletsztori))\nprint(str(elet), \"napot elt.\")\nprint('penz: '+str(penz), '\\nboldogsag: '+str(boldogsag), '\\negeszseg: '+str(egeszseg), '\\nnepszeruseg: ', str(nepszeruseg))\n","sub_path":"diedstory_0.1.py","file_name":"diedstory_0.1.py","file_ext":"py","file_size_in_byte":2167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"41184249","text":"# -*- coding: utf-8 -*-\r\nfrom __future__ import unicode_literals\r\n\r\nfrom django.db import models, migrations\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n dependencies = [\r\n ]\r\n\r\n operations = [\r\n migrations.CreateModel(\r\n name='Critic',\r\n fields=[\r\n ('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),\r\n ('name', models.CharField(max_length=80)),\r\n ],\r\n ),\r\n migrations.CreateModel(\r\n name='Review',\r\n fields=[\r\n ('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),\r\n ('MovieID', models.CharField(max_length=80)),\r\n ('Movietitle', models.CharField(max_length=80)),\r\n ('review', models.TextField()),\r\n ('Rating', models.IntegerField()),\r\n ('release_date', models.DateField()),\r\n ('critic', models.ForeignKey(to='myapp.Critic')),\r\n ],\r\n ),\r\n migrations.CreateModel(\r\n name='User',\r\n fields=[\r\n ('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),\r\n ('name', models.CharField(max_length=80)),\r\n ('critic', models.ForeignKey(to='myapp.Critic')),\r\n ],\r\n ),\r\n ]\r\n","sub_path":"myapp/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"522838073","text":"from django.http import HttpResponse\nfrom django.http import Http404\nfrom django.shortcuts import render\nfrom django.shortcuts import get_object_or_404, render\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.http import JsonResponse\nfrom django.http import QueryDict\n\n\nfrom .models import Product\n\n\n# GET PRODUCT -- GET METHOD\n@csrf_exempt\ndef getproducts(request):\n if request.method == 'GET':\n product_list = list(Product.objects.values())\n return JsonResponse(dict(products=product_list), safe=False)\n\n\n# ADD PRODUCT -- POST METHOD\n@csrf_exempt\ndef addproduct(request):\n if request.method == 'POST':\n if request.POST.get('name') and request.POST.get('description') and request.POST.get('price'):\n product=Product()\n product.name= request.POST.get('name')\n product.description= request.POST.get('description')\n product.price = request.POST.get('price')\n product.save()\n\n data = dict()\n data['name'] = product.name\n data['description'] = product.description\n data['price'] = product.price\n data['id'] = product.id\n return JsonResponse(data, safe=False)\n # RETURN THE ONE PRODUCT TO BE ADDED ON FRONT END TABLE\n\n\n# UPDATE PRODUCT -- UPDATE REQUEST BODY\n@csrf_exempt\ndef updateproduct(request):\n\n data = QueryDict(request.body)\n existing = data['id']\n name = data['name']\n description = data['description']\n price = data['price']\n\n if request.method == 'PUT':\n\n product=Product()\n product = Product.objects.get(id=existing)\n\n product.name = name\n product.description = description\n product.price = price\n product.save()\n\n print(product.id)\n\n data = dict()\n data['name'] = name\n data['description'] = description\n data['price'] = price\n data['id'] = product.id\n\n\n return JsonResponse(data, safe=False)\n\n\n# DELETE PRODUCT -- DELETE METHOD\n@csrf_exempt\ndef deleteproduct(request):\n if request.method == 'DELETE':\n deletedata = QueryDict(request.body)\n id = deletedata['id']\n\n product = Product.objects.get(id=id)\n product.delete()\n\n data = dict()\n data['id'] = id\n return JsonResponse(data, safe=False)\n","sub_path":"lab5/product/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"576534526","text":"from __future__ import absolute_import\n\nimport os\nimport json\nimport logging\n\nfrom eth_utils import (\n add_0x_prefix,\n is_string,\n to_dict,\n to_tuple,\n)\n\nfrom .filesystem import (\n recursive_find_files,\n ensure_file_exists,\n)\n\n\nDEFAULT_CONTRACTS_DIR = \"./contracts/\"\n\n\ndef get_contracts_source_dir(project_dir):\n contracts_source_dir = os.path.join(project_dir, DEFAULT_CONTRACTS_DIR)\n return os.path.abspath(contracts_source_dir)\n\n\nBUILD_ASSET_DIR = \"./build\"\n\n\ndef get_build_asset_dir(project_dir):\n build_asset_dir = os.path.join(project_dir, BUILD_ASSET_DIR)\n return build_asset_dir\n\n\nCOMPILED_CONTRACTS_ASSET_FILENAME = './contracts.json'\n\n\ndef get_compiled_contracts_asset_path(build_asset_dir):\n compiled_contracts_asset_path = os.path.join(\n build_asset_dir,\n COMPILED_CONTRACTS_ASSET_FILENAME,\n )\n return compiled_contracts_asset_path\n\n\n@to_tuple\ndef find_solidity_source_files(base_dir):\n return (\n os.path.relpath(source_file_path)\n for source_file_path\n in recursive_find_files(base_dir, \"*.sol\")\n )\n\n\ndef get_project_source_paths(contracts_source_dir):\n project_source_paths = find_solidity_source_files(contracts_source_dir)\n return project_source_paths\n\n\ndef get_test_source_paths(tests_dir):\n test_source_paths = find_solidity_source_files(tests_dir)\n return test_source_paths\n\n\ndef process_compiler_output(name_from_compiler, data_from_compiler):\n # TODO: use the source path.\n _, _, contract_name = name_from_compiler.rpartition(':')\n contract_data = normalize_contract_data(data_from_compiler)\n return contract_name, contract_data\n\n\ndef _load_json_if_string(value):\n if is_string(value):\n return json.loads(value)\n else:\n return value\n\n\n@to_dict\ndef normalize_contract_data(contract_data):\n if 'metadata' in contract_data:\n yield 'metadata', normalize_contract_metadata(contract_data['metadata'])\n if 'bin' in contract_data:\n yield 'bytecode', add_0x_prefix(contract_data['bin'])\n if 'bin-runtime' in contract_data:\n yield 'bytecode_runtime', add_0x_prefix(contract_data['bin-runtime'])\n if 'abi' in contract_data:\n yield 'abi', _load_json_if_string(contract_data['abi'])\n if 'userdoc' in contract_data:\n yield 'userdoc', _load_json_if_string(contract_data['userdoc'])\n if 'devdoc' in contract_data:\n yield 'devdoc', _load_json_if_string(contract_data['devdoc'])\n\n\ndef normalize_contract_metadata(metadata):\n if not metadata:\n return None\n elif is_string(metadata):\n return json.loads(metadata)\n else:\n raise ValueError(\"Unknown metadata format '{0}'\".format(metadata))\n\n\ndef write_compiled_sources(compiled_contracts_asset_path, compiled_sources):\n logger = logging.getLogger('populus.compilation.write_compiled_sources')\n ensure_file_exists(compiled_contracts_asset_path)\n\n with open(compiled_contracts_asset_path, 'w') as outfile:\n outfile.write(\n json.dumps(compiled_sources,\n sort_keys=True,\n indent=4,\n separators=(',', ': '))\n )\n\n logger.info(\n \"> Wrote compiled assets to: %s\",\n os.path.relpath(compiled_contracts_asset_path)\n )\n\n return compiled_contracts_asset_path\n","sub_path":"populus/utils/compile.py","file_name":"compile.py","file_ext":"py","file_size_in_byte":3332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"330819847","text":"\"\"\"\nTest cases for reading and updating PDBx data files using Python Wrapper\nIoAdapterCore wrapper which provides an API to the C++ CifFile class\nlibrary of file and dictionary tools that is conforms to our Native\nPython library.\n\"\"\"\nfrom __future__ import absolute_import, print_function\n\nimport pytest\n\ntry:\n from pathlib import Path\nexcept ImportError:\n from pathlib2 import Path\n\nfrom mmcif.io.IoAdapterPy import IoAdapterPy as IoAdapter\nfrom mmcif.io.PdbxReader import PdbxError, SyntaxError\n\n__docformat__ = \"restructuredtext en\"\n__author__ = \"Igor Petrik\"\n__email__ = \"petrikigor@gmail.com\"\n__license__ = \"Apache 2.0\"\n\nclass TestIoAdapter():\n __slots__ = ()\n \n @pytest.fixture()\n def io_data(self, test_files, in_tmpdir):\n inputs = {}\n\n inputs['pathPdbxDataFile'] = test_files / \"1kip.cif\"\n inputs['pathBigPdbxDataFile'] = test_files / \"1ffk.cif.gz\"\n inputs['pathPdbxDictFile'] = test_files / \"mmcif_pdbx_v5_next.dic\"\n inputs['testBlockCount'] = 7350\n inputs['pathErrPdbxDataFile'] = test_files / \"1bna-errors.cif\"\n inputs['pathQuotesPdbxDataFile'] = test_files / \"specialTestFile.cif\"\n #\n inputs['pathOutputPdbxFile'] = Path(\"myPdbxOutputFile.cif\")\n inputs['pathOutputPdbxFileSelect'] = Path(\"myPdbxOutputFileSelect.cif\")\n inputs['pathOutputPdbxFileExclude'] = Path(\"myPdbxOutputFileExclude.cif\")\n #\n inputs['pathQuotesOutputPdbxFile'] = Path(\"myPdbxQuotesOutputFile.cif\")\n inputs['pathBigOutputDictFile'] = Path(\"myDictOutputFile.cif\")\n\n inputs['pathMissingFile'] = test_files / \"unicode-test-missing.cif\"\n #\n inputs['pathUnicodePdbxFile'] = test_files / \"unicode-test.cif\"\n inputs['pathCharRefPdbxFile'] = test_files / \"unicode-char-ref-test.cif\"\n #\n inputs['pathOutputUnicodePdbxFile'] = Path(\"out-unicode-test.cif\")\n inputs['pathOutputCharRefPdbxFile'] = Path(\"out-unicode-char-ref-test.cif\")\n\n inputs['pathOutputDir'] = Path()\n \n return inputs\n\n @pytest.mark.parametrize('fp_key, enforceAscii',\n [('pathPdbxDataFile', False), \n ('pathBigPdbxDataFile', False), \n ('pathQuotesPdbxDataFile', False), \n ('pathUnicodePdbxFile', False), \n ('pathPdbxDataFile', True), \n ('pathBigPdbxDataFile', True), \n ('pathQuotesPdbxDataFile', True)])\n def testFileReader(self, io_data, fp_key, enforceAscii):\n io = IoAdapter(raiseExceptions=True)\n containerList = io.readFile(str(io_data[fp_key]), \n enforceAscii=enforceAscii, \n outDirPath=str(io_data['pathOutputDir']))\n print (\"Read %d data blocks\" % len(containerList))\n assert len(containerList) == 1\n\n @pytest.mark.parametrize('fp_key', \n ['pathPdbxDictFile', 'pathPdbxDictFile'])\n def test_dict_reader(self, io_data, fp_key):\n io = IoAdapter(raiseExceptions=True)\n containerList = io.readFile(str(io_data[fp_key]), enforceAscii=False, \n outDirPath=str(io_data['pathOutputDir']))\n print(\"Read %d data blocks\" % len(containerList))\n assert len(containerList) > io_data['testBlockCount']\n\n @pytest.mark.parametrize('fp_key', \n ['pathErrPdbxDataFile', 'pathErrPdbxDataFile'])\n def test_file_reader_exception_handler_1(self, io_data, fp_key):\n io = IoAdapter(raiseExceptions=True)\n print (io_data)\n with pytest.raises(SyntaxError):\n io.readFile(str(io_data[fp_key]), enforceAscii=False, \n outDirPath=str(io_data['pathOutputDir']))\n\n @pytest.mark.parametrize('fp_key, expected_exc, enforceAscii', \n [('pathMissingFile', PdbxError, True),])\n def test_file_reader_exception_handler_2(self, io_data, fp_key, expected_exc, \n enforceAscii):\n with pytest.raises(expected_exc):\n io = IoAdapter(raiseExceptions=True, readEncodingErrors='ignore')\n containerList = io.readFile(str(io_data[fp_key]), \n enforceAscii=enforceAscii, \n outDirPath=str(io_data['pathOutputDir']))\n print (\"Containerlist length %d \" % len(containerList))\n\n @pytest.mark.parametrize('ifp_key, ofp_key, enforceAscii', \n [('pathBigPdbxDataFile', 'pathOutputPdbxFile', True),\n ('pathPdbxDictFile', 'pathBigOutputDictFile', True), \n ('pathQuotesPdbxDataFile', 'pathQuotesOutputPdbxFile', True), \n ('pathUnicodePdbxFile', 'pathOutputUnicodePdbxFile', False), \n ('pathCharRefPdbxFile', 'pathOutputCharRefPdbxFile', False)])\n def test_file_reader_writer(self, io_data, ifp_key, ofp_key, enforceAscii):\n io = IoAdapter(raiseExceptions=True, useCharRefs=enforceAscii)\n containerList = io.readFile(str(io_data[ifp_key]))\n print (\"Read %d data blocks\" % len(containerList))\n ok = io.writeFile(str(io_data[ofp_key]), containerList=containerList, \n enforceAscii=enforceAscii)\n assert ok\n\n @pytest.mark.parametrize('ifp_key, ofp_key, selectList, excludeFlag',\n [('pathBigPdbxDataFile', 'pathOutputPdbxFileSelect', ['atom_site'], False), \n ('pathBigPdbxDataFile', 'pathOutputPdbxFileExclude', ['atom_site'], True)])\n def test_file_reader_writer_select(self, io_data, ifp_key, ofp_key, selectList, excludeFlag):\n io = IoAdapter(raiseExceptions=False, useCharRefs=True)\n containerList = io.readFile(str(io_data[ifp_key]), enforceAscii=True, \n selectList=selectList, \n excludeFlag=excludeFlag, \n outDirPath=str(io_data['pathOutputDir']))\n print (\"Read %d data blocks\" % len(containerList))\n ok = io.writeFile(str(io_data[ofp_key]), containerList=containerList, \n enforceAscii=True)\n assert ok\n\n","sub_path":"tests/io_adapter_py_test.py","file_name":"io_adapter_py_test.py","file_ext":"py","file_size_in_byte":6392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"199137074","text":"from django.conf.urls import include, patterns, url\nfrom django.conf.urls.i18n import i18n_patterns\nfrom django.contrib import admin\nfrom django.conf import settings\n\nadmin.autodiscover()\n\nif settings.USE_I18N:\n ptrn = i18n_patterns\nelse:\n ptrn = patterns\n\nurlpatterns = ptrn('',\n url(r'^', include('apps.{{project_name}}.urls')),\n url(r'^admin/', include(admin.site.urls)),\n # urls here\n)\n\nif settings.DEBUG:\n urlpatterns = patterns('',\n url(r'^browse-api/', include('rest_framework.urls', namespace='rest_framework')),\n url(r'^media/(?P.*)$', 'django.views.static.serve',\n {'document_root': settings.MEDIA_ROOT, 'show_indexes': True}),\n url(r'', include('django.contrib.staticfiles.urls')),\n) + urlpatterns\n","sub_path":"src/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"297627284","text":"# Copyright 2016 Catalyst IT Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom oslo_config import cfg\nfrom oslo_log import log as logging\n\nfrom distil.collector import base\nfrom distil.common import constants\nfrom distil.common import general\nfrom distil.common import openstack\n\nLOG = logging.getLogger(__name__)\nCONF = cfg.CONF\n\n\nclass CeilometerCollector(base.BaseCollector):\n def __init__(self, *arg, **kwargs):\n super(CeilometerCollector, self).__init__(*arg, **kwargs)\n\n self.cclient = openstack.get_ceilometer_client()\n\n @general.disable_ssl_warnings\n def get_meter(self, project_id, meter, start, end):\n \"\"\"Get samples of a particular meter.\n\n Sample example:\n [\n {\n \"id\": \"e04ace6e-2229-11e6-ad16-bc764e068568\",\n \"metadata\": {\n \"name1\": \"value1\",\n \"name2\": \"value2\"\n },\n \"meter\": \"instance\",\n \"project_id\": \"35b17138-b364-4e6a-a131-8f3099c5be68\",\n \"recorded_at\": \"2015-01-01T12:00:00\",\n \"resource_id\": \"bd9431c1-8d69-4ad3-803a-8d4a6b89fd36\",\n \"source\": \"openstack\",\n \"timestamp\": \"2015-01-01T12:00:00\",\n \"type\": \"gauge\",\n \"unit\": \"instance\",\n \"user_id\": \"efd87807-12d2-4b38-9c70-5f5c2ac427ff\",\n \"volume\": 1.0\n }\n ]\n \"\"\"\n query = [\n dict(field='project_id', op='eq', value=project_id),\n dict(field='meter', op='eq', value=meter),\n dict(field='timestamp', op='ge',\n value=start.strftime(constants.date_format)),\n dict(field='timestamp', op='lt',\n value=end.strftime(constants.date_format)),\n ]\n\n sample_objs = self.cclient.new_samples.list(q=query)\n\n # The samples are in descending order by default, should change it to\n # be ascending, making the logic consistent with deprecated code.\n sample_objs.reverse()\n\n return [obj.to_dict() for obj in sample_objs]\n","sub_path":"distil/collector/ceilometer.py","file_name":"ceilometer.py","file_ext":"py","file_size_in_byte":2638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"537131153","text":"from AI.base import *\n\nclass TeamAI(BaseAI):\n def __init__(self, helper):\n self.helper = helper\n self.equipments = [0, 0, 0, 0, 0] # Set the number of your equipments.\n self.color = (255, 255, 255) # Set the color you like.\n\n\n def decide(self):\n my_pos = self.helper.get_player_position()\n radius = self.helper.player_radius\n carry = self.helper.get_player_value()\n nearest_oil_pos = self.helper.get_nearest_oil()\n home = self.helper.get_base_center()\n destination = nearest_oil_pos if carry < 5000 else home\n if destination is None:\n return AI_DIR_STOP\n else:\n direction = (destination[0] - my_pos[0], destination[1] - my_pos[1])\n return self.helper.get_direction(direction)\n","sub_path":"AI/team_default.py","file_name":"team_default.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"218734883","text":"import os\n\nfrom updateaction import UpdateAction\n\n\nclass UploadAction(UpdateAction):\n\n def execute(self):\n while True:\n lpath = input(\"Which directory would you like to upload? (Default: Desktop\\\\lang)\\n\")\n rpath = input(\"Where do you want to upload the directory on the remote server? (Default: \"\n \"/home/lars/Skripts/geniusbot/lang)\\n\")\n lpath = lpath if lpath != \"\" else os.path.expanduser(\"~/Desktop\") + \"/lang\"\n rpath = rpath if rpath != \"\" else \"/home/lars/Skripts/geniusbot/lang\"\n\n if os.path.exists(lpath) and os.path.isdir(lpath):\n try:\n if self.con.exists(rpath):\n try:\n with self.con.cd(rpath):\n for file in self.con.listdir():\n self.con.remove(file)\n print(\"Deleted remote \" + file)\n except IOError:\n print(\"Remote path \\\"\" + rpath + \"\\\" is not a directory!\")\n return\n with self.con.cd(rpath):\n for file in self.get_files_from_folder(lpath):\n self.con.put(lpath + \"/\" + file)\n print(\"Uploaded \" + file)\n except FileNotFoundError:\n print(\"Remote path \\\"\" + rpath + \"\\\" does not exist!\")\n return\n break\n else:\n print(\"Local path \\\"\" + lpath + \"\\\" does not exist!\")\n return\n\n print(\"Successfully uploaded folder to remote server\")\n","sub_path":"updater/updatemodes/upload.py","file_name":"upload.py","file_ext":"py","file_size_in_byte":1704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"613951340","text":"'''\nSimple tool to write a series of images into a video.\n'''\n\nimport cv2\nfrom image import Image\nfrom multiprocessing import Process\n\ndef write_video(frames, output_file, codec='XVID', fps=30, blocking=True):\n\n def write():\n res = frames[0].shape[:2][::-1]\n fourcc = cv2.cv.CV_FOURCC(*codec)\n out = cv2.VideoWriter(output_file, fourcc, fps, res)\n\n if not out.isOpened():\n raise Exception(\"Could not open video writer for file {0}\".format(output_file))\n\n for frame in frames:\n if isinstance(frame, Image):\n out.write(frame.raw_data)\n else:\n out.write(frame)\n\n out.release()\n\n if blocking:\n write()\n else:\n p = Process(target=write)\n p.start()\n","sub_path":"perception/video_writer.py","file_name":"video_writer.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"635262898","text":"import cv2 as cv\r\nimport sys\r\nimg_src = \"pic.png\"\r\nimg = cv.imread(img_src,cv.IMREAD_UNCHANGED)\r\ncv.imshow(\"Display\",img)\r\nk = cv.waitKey(0)\r\nif k==ord(\"s\"):\r\n cv.destroyAllWindows()\r\n \r\n\r\n","sub_path":"ldy/1014作业/Homework.py","file_name":"Homework.py","file_ext":"py","file_size_in_byte":195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"314949502","text":"import argparse\nfrom os.path import join, exists\nimport json\nimport functools\n\nimport pprint\npp = pprint.PrettyPrinter(indent=4)\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom models.enc_dec.dSprites import Encoder_1Konny, Decoder_1Konny, \\\n DiscriminatorZ_1Konny\nfrom models.generative.aae import AAE\n\nfrom my_utils.python_utils.general import make_dir_if_not_exist, remove_dir_if_exist, print_both\nfrom my_utils.python_utils.training import iterate_data\nfrom my_utils.python_utils.general import one_hot\nfrom my_utils.tensorflow_utils.training.helper import SimpleTrainHelper, SimpleParamPrinter\n\nfrom utils.general import normal_density, at_bin\nfrom global_settings import RAW_DATA_DIR\n\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument('--output_dir', type=str, required=True)\nparser.add_argument('--load_step', type=int, default=-1)\n\nparser.add_argument('--save_dir', type=str, required=True)\nparser.add_argument('--num_bins', type=int, default=100)\nparser.add_argument('--bin_limits', type=str, default=\"-4;4\")\nparser.add_argument('--data_proportion', type=float, default=1.0)\n\n\ndef main(args):\n # =====================================\n # Load config\n # =====================================\n with open(join(args.output_dir, 'config.json')) as f:\n config = json.load(f)\n args.__dict__.update(config)\n\n # =====================================\n # Dataset\n # =====================================\n data_file = join(RAW_DATA_DIR, \"ComputerVision\", \"dSprites\",\n \"dsprites_ndarray_co1sh3sc6or40x32y32_64x64.npz\")\n\n # It is already in the range [0, 1]\n with np.load(data_file, encoding=\"latin1\") as f:\n x_train = f['imgs']\n # 3 shape * 6 scale * 40 rotation * 32 pos X * 32 pos Y\n y_train = f['latents_classes']\n\n x_train = np.expand_dims(x_train.astype(np.float32), axis=-1)\n num_train = len(x_train)\n print(\"num_train: {}\".format(num_train))\n\n print(\"y_train[:10]: {}\".format(y_train[:10]))\n\n # =====================================\n # Instantiate model\n # =====================================\n if args.enc_dec_model == \"1Konny\":\n encoder = Encoder_1Konny(args.z_dim, stochastic=True)\n decoder = Decoder_1Konny()\n disc_z = DiscriminatorZ_1Konny()\n else:\n raise ValueError(\"Do not support enc_dec_model='{}'!\".format(args.enc_dec_model))\n\n model = AAE([64, 64, 1], args.z_dim,\n encoder=encoder, decoder=decoder,\n discriminator_z=disc_z,\n rec_x_mode=args.rec_x_mode,\n stochastic_z=args.stochastic_z,\n use_gp0_z=True, gp0_z_mode=args.gp0_z_mode)\n\n loss_coeff_dict = {\n 'rec_x': args.rec_x_coeff,\n 'G_loss_z1_gen': args.G_loss_z1_gen_coeff,\n 'D_loss_z1_gen': args.D_loss_z1_gen_coeff,\n 'gp0_z': args.gp0_z_coeff,\n }\n\n model.build(loss_coeff_dict)\n SimpleParamPrinter.print_all_params_tf_slim()\n\n # =====================================\n # Load model\n # =====================================\n config_proto = tf.ConfigProto(allow_soft_placement=True)\n config_proto.gpu_options.allow_growth = True\n config_proto.gpu_options.per_process_gpu_memory_fraction = 0.9\n sess = tf.Session(config=config_proto)\n\n model_dir = make_dir_if_not_exist(join(args.output_dir, \"model_tf\"))\n train_helper = SimpleTrainHelper(log_dir=None, save_dir=model_dir)\n\n # Load model\n train_helper.load(sess, load_step=args.load_step)\n\n # =====================================\n # Experiments\n save_dir = make_dir_if_not_exist(join(args.save_dir, \"{}_{}\".format(\n args.enc_dec_model, args.run)))\n # =====================================\n\n np.set_printoptions(threshold=np.nan, linewidth=1000, precision=5, suppress=True)\n\n num_bins = args.num_bins\n bin_limits = tuple([float(s) for s in args.bin_limits.split(\";\")])\n data_proportion = args.data_proportion\n num_data = int(data_proportion * num_train)\n assert num_data == num_train, \"For dSprites, you must use all data!\"\n eps = 1e-8\n\n # file\n f = open(join(save_dir, 'log[bins={},bin_limits={},data={}].txt'.\n format(num_bins, bin_limits, data_proportion)), mode='w')\n\n # print function\n print_ = functools.partial(print_both, file=f)\n\n print_(\"num_bins: {}\".format(num_bins))\n print_(\"bin_limits: {}\".format(bin_limits))\n print_(\"data_proportion: {}\".format(data_proportion))\n\n # Compute bins\n # ================================= #\n print_(\"\")\n print_(\"bin_limits: {}\".format(bin_limits))\n assert len(bin_limits) == 2 and bin_limits[0] < bin_limits[1], \"bin_limits={}\".format(bin_limits)\n\n bins = np.linspace(bin_limits[0], bin_limits[1], num_bins + 1, endpoint=True)\n print_(\"bins: {}\".format(bins))\n assert len(bins) == num_bins + 1\n\n bin_widths = [bins[b] - bins[b - 1] for b in range(1, len(bins))]\n print_(\"bin_widths: {}\".format(bin_widths))\n assert len(bin_widths) == num_bins, \"len(bin_widths)={} while num_bins={}!\".format(len(bin_widths), num_bins)\n assert np.all(np.greater(bin_widths, 0)), \"bin_widths: {}\".format(bin_widths)\n\n bin_centers = [(bins[b] + bins[b - 1]) * 0.5 for b in range(1, len(bins))]\n print_(\"bin_centers: {}\".format(bin_centers))\n assert len(bin_centers) == num_bins, \"len(bin_centers)={} while num_bins={}!\".format(len(bin_centers), num_bins)\n # ================================= #\n\n # Compute representations\n # ================================= #\n z_data_file = join(save_dir, \"z_data[data={}].npz\".format(data_proportion))\n\n if not exists(z_data_file):\n all_z_mean = []\n all_z_stddev = []\n\n print(\"\")\n print(\"Compute all_z_mean, all_z_stddev and all_attrs!\")\n\n count = 0\n for batch_ids in iterate_data(num_data, 10 * args.batch_size, shuffle=False):\n x = x_train[batch_ids]\n\n z_mean, z_stddev = sess.run(\n model.get_output(['z_mean', 'z_stddev']),\n feed_dict={model.is_train: False, model.x_ph: x})\n\n all_z_mean.append(z_mean)\n all_z_stddev.append(z_stddev)\n\n count += len(batch_ids)\n print(\"\\rProcessed {} samples!\".format(count), end=\"\")\n print()\n\n all_z_mean = np.concatenate(all_z_mean, axis=0)\n all_z_stddev = np.concatenate(all_z_stddev, axis=0)\n\n np.savez_compressed(z_data_file, all_z_mean=all_z_mean,\n all_z_stddev=all_z_stddev)\n else:\n print(\"{} exists. Load data from file!\".format(z_data_file))\n with np.load(z_data_file, \"r\") as f:\n all_z_mean = f['all_z_mean']\n all_z_stddev = f['all_z_stddev']\n # ================================= #\n\n print_(\"\")\n all_Q_z_cond_x = []\n for i in range(args.z_dim):\n print_(\"\\nCompute all_Q_z{}_cond_x!\".format(i))\n\n all_Q_s_cond_x = []\n for batch_ids in iterate_data(len(all_z_mean), 500, shuffle=False, include_remaining=True):\n # (batch_size, num_bins)\n q_s_cond_x = normal_density(np.expand_dims(bin_centers, axis=0),\n mean=np.expand_dims(all_z_mean[batch_ids, i], axis=-1),\n stddev=np.expand_dims(all_z_stddev[batch_ids, i], axis=-1))\n\n # (batch_size, num_bins)\n max_q_s_cond_x = np.max(q_s_cond_x, axis=-1)\n # print(\"\\nmax_q_s_cond_x: {}\".format(np.sort(max_q_s_cond_x)))\n\n # (batch_size, num_bins)\n deter_s_cond_x = at_bin(all_z_mean[batch_ids, i], bins).astype(np.float32)\n\n # (batch_size, num_bins)\n Q_s_cond_x = q_s_cond_x * np.expand_dims(bin_widths, axis=0)\n Q_s_cond_x = Q_s_cond_x / np.maximum(np.sum(Q_s_cond_x, axis=1, keepdims=True), eps)\n # print(\"sort(sum(Q_s_cond_x)) (before): {}\".format(np.sort(np.sum(Q_s_cond_x, axis=-1))))\n\n Q_s_cond_x = np.where(np.expand_dims(np.less(max_q_s_cond_x, 1e-5), axis=-1),\n deter_s_cond_x, Q_s_cond_x)\n # print(\"sort(sum(Q_s_cond_x)) (after): {}\".format(np.sort(np.sum(Q_s_cond_x, axis=-1))))\n\n all_Q_s_cond_x.append(Q_s_cond_x)\n\n # (num_samples, num_bins)\n all_Q_s_cond_x = np.concatenate(all_Q_s_cond_x, axis=0)\n assert np.all(all_Q_s_cond_x >= 0), \"'all_Q_s_cond_x' contains negative values. \" \\\n \"sorted_all_Q_s_cond_x[:30]:\\n{}!\".format(np.sort(all_Q_s_cond_x[:30], axis=None))\n assert len(all_Q_s_cond_x) == num_train\n\n all_Q_z_cond_x.append(all_Q_s_cond_x)\n\n # (z_dim, num_samples, num_bins)\n all_Q_z_cond_x = np.asarray(all_Q_z_cond_x, dtype=np.float32)\n print_(\"all_Q_z_cond_x.shape: {}\".format(all_Q_z_cond_x.shape))\n print_(\"sum(all_Q_z_cond_x)[:, :10]:\\n{}\".format(np.sum(all_Q_z_cond_x, axis=-1)[:, :10]))\n\n # (z_dim, num_bins)\n Q_z = np.mean(all_Q_z_cond_x, axis=1)\n log_Q_z = np.log(np.clip(Q_z, eps, 1-eps))\n print_(\"Q_z.shape: {}\".format(Q_z.shape))\n print_(\"sum(Q_z): {}\".format(np.sum(Q_z, axis=-1)))\n\n # (z_dim, )\n H_z = -np.sum(Q_z * log_Q_z, axis=-1)\n\n # Factors\n gt_factors = ['shape', 'scale', 'rotation', 'pos_x', 'pos_y']\n gt_num_values = [3, 6, 40, 32, 32]\n\n MI_z_y = np.zeros([args.z_dim, len(gt_factors)], dtype=np.float32)\n H_z_y = np.zeros([args.z_dim, len(gt_factors)], dtype=np.float32)\n\n ids_sorted = np.zeros([args.z_dim, len(gt_factors)], dtype=np.int32)\n MI_z_y_sorted = np.zeros([args.z_dim, len(gt_factors)], dtype=np.float32)\n H_z_y_sorted = np.zeros([args.z_dim, len(gt_factors)], dtype=np.float32)\n\n H_y = []\n RMIG = []\n JEMMI = []\n\n for k, (factor, num_values) in enumerate(zip(gt_factors, gt_num_values)):\n print_(\"\\n#\" + \"=\" * 50 + \"#\")\n print_(\"The {}-th gt factor '{}' has {} values!\".format(k, factor, num_values))\n\n print_(\"\")\n # (num_samples, num_categories)\n # NOTE: We must use k+1 to account for the 'color' attribute, which is always white\n all_Q_yk_cond_x = one_hot(y_train[:, k+1], num_categories=num_values, dtype=np.float32)\n print_(\"all_Q_yk_cond_x.shape: {}\".format(all_Q_yk_cond_x.shape))\n\n # (num_categories)\n Q_yk = np.mean(all_Q_yk_cond_x, axis=0)\n log_Q_yk = np.log(np.clip(Q_yk, eps, 1-eps))\n print_(\"Q_yk.shape: {}\".format(Q_yk.shape))\n\n H_yk = -np.sum(Q_yk * log_Q_yk)\n print_(\"H_yk: {}\".format(H_yk))\n H_y.append(H_yk)\n\n Q_z_yk = np.zeros([args.z_dim, num_bins, num_values], dtype=np.float32)\n\n # Compute I(zi, yk)\n for i in range(args.z_dim):\n print_(\"\\n#\" + \"-\" * 50 + \"#\")\n all_Q_zi_cond_x = all_Q_z_cond_x[i]\n assert len(all_Q_zi_cond_x) == len(all_Q_yk_cond_x) == num_train, \\\n \"all_Q_zi_cond_x.shape: {}, all_Q_yk_cond_x.shape: {}\".format(\n all_Q_zi_cond_x.shape, all_Q_yk_cond_x.shape)\n\n # (num_bins, num_categories)\n Q_zi_yk = np.matmul(np.transpose(all_Q_zi_cond_x, axes=[1, 0]), all_Q_yk_cond_x)\n Q_zi_yk = Q_zi_yk / num_train\n print_(\"np.sum(Q_zi_yk): {}\".format(np.sum(Q_zi_yk)))\n Q_zi_yk = Q_zi_yk / np.maximum(np.sum(Q_zi_yk), eps)\n print_(\"np.sum(Q_zi_yk) (normalized): {}\".format(np.sum(Q_zi_yk)))\n\n assert np.all(Q_zi_yk >= 0), \"'Q_zi_yk' contains negative values. \" \\\n \"sorted_Q_zi_yk[:10]:\\n{}!\".format(np.sort(Q_zi_yk, axis=None))\n\n # (num_bins, num_categories)\n log_Q_zi_yk = np.log(np.clip(Q_zi_yk, eps, 1 - eps))\n\n print_(\"\")\n print_(\"Q_zi (default): {}\".format(Q_z[i]))\n print_(\"Q_zi (sum of Q_zi_yk over yk): {}\".format(np.sum(Q_zi_yk, axis=-1)))\n\n print_(\"\")\n print_(\"Q_yk (default): {}\".format(Q_yk))\n print_(\"Q_yk (sum of Q_zi_yk over zi): {}\".format(np.sum(Q_zi_yk, axis=0)))\n\n MI_zi_yk = Q_zi_yk * (log_Q_zi_yk -\n np.expand_dims(log_Q_z[i], axis=-1) -\n np.expand_dims(log_Q_yk, axis=0))\n\n MI_zi_yk = np.sum(MI_zi_yk)\n H_zi_yk = -np.sum(Q_zi_yk * log_Q_zi_yk)\n\n Q_z_yk[i] = Q_zi_yk\n MI_z_y[i, k] = MI_zi_yk\n H_z_y[i, k] = H_zi_yk\n\n print_(\"#\" + \"-\" * 50 + \"#\")\n\n # Print statistics for all z\n print_(\"\")\n print_(\"MI_z_yk:\\n{}\".format(MI_z_y[:, k]))\n print_(\"H_z_yk:\\n{}\".format(H_z_y[:, k]))\n print_(\"H_z:\\n{}\".format(H_z))\n print_(\"H_yk:\\n{}\".format(H_yk))\n\n # Compute RMIG and JEMMI\n ids_yk_sorted = np.argsort(MI_z_y[:, k], axis=0)[::-1]\n MI_z_yk_sorted = np.take_along_axis(MI_z_y[:, k], ids_yk_sorted, axis=0)\n H_z_yk_sorted = np.take_along_axis(H_z_y[:, k], ids_yk_sorted, axis=0)\n\n RMIG_yk = np.divide(MI_z_yk_sorted[0] - MI_z_yk_sorted[1], H_yk)\n JEMMI_yk = np.divide(H_z_yk_sorted[0] - MI_z_yk_sorted[0] + MI_z_yk_sorted[1],\n H_yk + np.log(num_bins))\n\n ids_sorted[:, k] = ids_yk_sorted\n MI_z_y_sorted[:, k] = MI_z_yk_sorted\n H_z_y_sorted[:, k] = H_z_yk_sorted\n\n RMIG.append(RMIG_yk)\n JEMMI.append(JEMMI_yk)\n\n print_(\"\")\n print_(\"ids_sorted: {}\".format(ids_sorted))\n print_(\"MI_z_yk_sorted: {}\".format(MI_z_yk_sorted))\n print_(\"RMIG_yk: {}\".format(RMIG_yk))\n print_(\"JEMMI_yk: {}\".format(JEMMI_yk))\n\n z_yk_prob_file = join(save_dir, \"z_yk_prob_4_{}[bins={},bin_limits={},data={}].npz\".\n format(factor, num_bins, bin_limits, data_proportion))\n np.savez_compressed(z_yk_prob_file, Q_z_yk=Q_z_yk)\n print_(\"#\" + \"=\" * 50 + \"#\")\n\n results = {\n \"MI_z_y\": MI_z_y,\n \"H_z_y\": H_z_y,\n \"ids_sorted\": ids_sorted,\n \"MI_z_y_sorted\": MI_z_y_sorted,\n \"H_z_y_sorted\": H_z_y_sorted,\n \"H_z\": H_z,\n \"H_y\": np.asarray(H_y, dtype=np.float32),\n \"RMIG\": np.asarray(RMIG, dtype=np.float32),\n \"JEMMI\": np.asarray(JEMMI, dtype=np.float32),\n }\n result_file = join(save_dir, \"results[bins={},bin_limits={},data={}].npz\".\n format(num_bins, bin_limits, data_proportion))\n np.savez_compressed(result_file, **results)\n\n f.close()\n\n\nif __name__ == '__main__':\n args = parser.parse_args()\n main(args)\n","sub_path":"working/disentanglement/dSprites/AAE/exp_4_paper/7a_interpretability_metrics_v2.py","file_name":"7a_interpretability_metrics_v2.py","file_ext":"py","file_size_in_byte":14455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"431081262","text":"# coding:utf-8\n\n# 一本图书是一个字典,将图书列表按图书分类归类\ndef do(books):\n # 分类集合\n book_types=set(book['分类'] for book in books)\n print(book_types)\n result=[]\n for book_type in book_types:\n gronp=[]\n for book in books:\n if book['分类'] == book_type:\n gronp.append(book)\n result.append(gronp)\n return result\n\nif __name__ == '__main__':\n books=[\n {'分类':'科幻','书名':'三体'},\n {'分类':'IT','书名':'Python编程'},\n {'分类':'科幻','书名':'遗落的南境'},\n {'分类':'IT','书名':'Java编程思想'},\n {'分类':'IT','书名':'算法导论'},\n {'分类':'科幻','书名':'星际大战'},\n {'分类':'科幻','书名':'朝闻道'},\n {'分类':'IT','书名':'Python3网络爬虫'},\n ]\n result=do(books)\n for i in result:\n print(i)","sub_path":"编程挑战/将字典列表中的数据按某个键进行分类.py","file_name":"将字典列表中的数据按某个键进行分类.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"181608538","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom collections import Counter\n\ndef get_message(path): \n messages = [list(line.rstrip()) for line in open(path)]\n return ''.join(Counter(c).most_common()[0][0] for c in zip(*messages))\n\nif __name__ == '__main__':\n print('Message =', get_message('input.txt'))\n\n","sub_path":"6/repetition_1.py","file_name":"repetition_1.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"545916472","text":"from flask import Flask, render_template\nfrom utils import get_data, prepare_str\n\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef get_home_page():\n return render_template(\"home.html\", data=get_data())\n\n\n@app.route(\"/author\")\ndef get_author():\n dict_of_link = {prepare_str(x.get('title')): x.get('title') for x in get_data()}\n return render_template(\"author.html\", dict_of_link=dict_of_link)\n\n\n@app.route('/')\ndef get_page(page_name):\n obj = prepare_str(page_name)\n obj_text = [x.get('text') for x in get_data() if prepare_str(x.get('title')) == obj]\n dict_of_link = {prepare_str(x.get('title')): x.get('title') for x in get_data()}\n if obj_text:\n return render_template(\"base_page.html\", title=obj, text=obj_text[0], dict_of_link=dict_of_link)\n else:\n return render_template(\"error_page.html\", title=page_name, dict_of_link=dict_of_link)\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","sub_path":"flask_lesson_intro/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"90782214","text":"from io import StringIO\nfrom lxml import etree\nimport requests\n\ndef parse(url):\n res = requests.get(url)\n parser = etree.HTMLParser()\n tree = etree.parse(StringIO(res.text), parser)\n root = tree.getroot()\n\n # Get body.\n try:\n body = root.getchildren()[1]\n container = body.getchildren()[1].getchildren()\n except Exception:\n return []\n elements = list(filter(lambda x: x.attrib.get(\"class\") == \"hentry member_contribution\", container))\n quotes = []\n for e in elements:\n try:\n cite = e.getchildren()[1].getchildren()[1]\n name = cite.getchildren()[0].text\n except Exception:\n continue\n if \"Prime Minister\" in name:\n try:\n text = \"\"\n paragraphs = list(filter(lambda x: x.tag == \"p\", e.getchildren()[1].getchildren()))\n for p in paragraphs:\n for part in p.xpath(\"child::node()\"):\n try:\n if isinstance(part, str):\n text += part.lstrip().rstrip() + \" \"\n elif part.tag == \"q\":\n print(part.text)\n text += part.text.lstrip().rstrip()\n except Exception:\n continue\n except Exception:\n continue\n quotes.append(text)\n return quotes\n\n","sub_path":"dkc.py","file_name":"dkc.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"174743933","text":"import socket # allows establish connection over internet\nfrom IPy import IP\nimport threading\nfrom queue import Queue\nimport time\nfrom datetime import datetime\n\n# testing port scanner by using testphp.vulnweb.com website\ndef DNS(ip):\n try:\n IP(ip)\n return ip\n except ValueError: # if user input is domain name, find ip address\n return socket.gethostbyname(ip)\n\ndef get_banner(s):\n return s.recv(1024)\n\ndef scan_port(ipaddress, port):\n try:\n # s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n # socket.setdefaulttimeout(1) \n s = socket.socket()\n s.settimeout(0.5)\n s.connect((ipaddress, port)) # socket try to connect to ip_address:port\n \n try:\n banner = get_banner(s)\n print(f'[+] Open Port {port} : {banner}')\n except:\n print(f'[+] Open Port {port}')\n \n except:\n pass # pass if port is closed\n\ntargets_count = 0\ndef scan_target(target, port_num):\n global targets_count # access varaible that is declared in the outer scope\n ip = DNS(target)\n \n print(f'\\n[Scanning Target {targets_count}] {target}')\n targets_count += 1\n \n # scan_threads(ip, port_num)\n for port in range(1, int(port_num)):\n scan_port(ip, port)\n\ndef scan(targets, port_num):\n print(f'\\nScanning started at: {datetime.now()}')\n\n if ',' in targets:\n for ip_add in targets.split(','):\n scan_target(ip_add.strip(' '), port_num)\n else:\n scan_target(targets, port_num)\n\n print(f'\\nScanning finished at: {datetime.now()}')\n\n# # To prevent \"double\" modification of shared vairable. (race condition)\n# lock = threading.Lock()\n# q = Queue()\n\n# # thread is running by getting process from queue\n# def threader(targets):\n# while True:\n# # gets thread from the queue\n# thread_port = q.get()\n\n# # Run the sacn\n# scan_port(targets, thread_port)\n \n# q.task_done()\n\n# def scan_threads(targets, port_num):\n# # how many threads are allowed\n# for x in range(100):\n# t = threading.Thread(target=threader, args=(targets))\n\n# # classifying as a daemon, so they will die when the main dies\n# t.daemon = True\n\n# # begins, must come after daemon definition\n# t.start()\n\n# # 100 jobs assigned\n# for port in range(1, port_num):\n# q.put(port)\n\n# # wait until the thread terminates.\n# q.join()\n\n# only run portScanner.py is running. \n# if it is imported from other file, it will not execute\nif __name__ == \"__main__\": \n targets = input('[+] Enter Target/s To Scan (split multiple targets with ,): ')\n port_num = input('Enter Number Of Ports To Scan (Scan starts from port 1): ')\n\n scan(targets, port_num)\n\n","sub_path":"python/simple_Port_Scanner/portScanner.py","file_name":"portScanner.py","file_ext":"py","file_size_in_byte":2776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"552338426","text":"from django.conf.urls.defaults import patterns, include, url\n\n# from django.contrib import admin\n# admin.autodiscover()\n\nurlpatterns = patterns('',\n\turl(r'^', include('django_boilerplate.sample_app.urls')),\n\n # url(r'^admin/', include(admin.site.urls)),\n)\n\n# Reference\n# http://docs.python.org/library/re.html\n# https://docs.djangoproject.com/en/1.3/topics/http/urls/\n# https://docs.djangoproject.com/en/1.3/ref/class-based-views/","sub_path":"django_boilerplate/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"330464038","text":"\"\"\"\nThe impelmentation of Attention-CNN model≥\n\nAuthor: Haotian Xue\n\"\"\"\n\nimport copy\nimport torch\nimport torch.nn as nn\nfrom sen_tensor_model_class import SenTensorModel\nfrom utils import layers, attention\n\n\nclass AttnCnnModel(SenTensorModel):\n def __init__(self,\n train_data_set,\n test_data_set,\n hyper_parameter,\n train_requirement,\n is_gpu=torch.cuda.is_available(),\n model_save_path=\"../trained_model/self_attn_model.pt\"):\n super(AttnCnnModel, self).__init__(train_data_set,\n test_data_set,\n hyper_parameter,\n train_requirement,\n is_gpu,\n model_save_path)\n self.model = self.build_model()\n if is_gpu:\n self.model = self.model.cuda()\n self.train_test()\n\n def build_model(self):\n d_w, hidden_dim, num_layers, num_heads, window_size, num_filter = self.extract_hyper_parameters()\n print(\"-----Start building model-----\")\n model = AttnCnnModelHelper(d_w=d_w,\n word_emb_weight=torch.from_numpy(self.test_data_set.word_embedding),\n hidden_dim=hidden_dim,\n num_layers=num_layers,\n num_heads=num_heads,\n window_size=window_size,\n num_filter=num_filter)\n print(\"-----Finish building model-----\")\n return model\n\n def extract_hyper_parameters(self):\n return self.hyper_parameter[\"d_w\"], \\\n self.hyper_parameter[\"hidden_dim\"], \\\n self.hyper_parameter[\"num_layers\"], \\\n self.hyper_parameter[\"num_heads\"], \\\n self.hyper_parameter[\"window_size\"], \\\n self.hyper_parameter[\"num_filter\"]\n\n\nclass AttnCnnModelHelper(nn.Module):\n def __init__(self, d_w, hidden_dim, word_emb_weight, num_layers=4,\n num_heads=5, window_size=3, num_filter=128, dropout_p=0.1, num_classes=2):\n super(AttnCnnModelHelper, self).__init__()\n self.w2v = nn.Embedding.from_pretrained(word_emb_weight, freeze=False)\n c = copy.deepcopy\n d_model = d_w\n self_attn = attention.MultiHeadAttention(h=num_heads, d_model=d_model, dropout=dropout_p)\n ff = layers.PositionwiseFeedForward(d_model=d_model, d_ff=hidden_dim, dropout=dropout_p)\n self.attn_layer = nn.Sequential(\n layers.Encoder(layers.EncoderLayer(d_model, c(self_attn), c(ff), dropout_p), num_layers)\n ) # (batch, sen_len, d_model)\n self.cnn_layer = nn.Sequential(\n nn.Conv2d(in_channels=1,\n out_channels=num_filter,\n kernel_size=(window_size, d_w),\n stride=(1, 1),\n padding=(1, 0)), # out_shape: (batch_size, num_filter, max_sen_len, 1)\n nn.MaxPool2d(kernel_size=(150, 1),\n stride=(1, 1)), # out_shape: (batch_size, num_filter, 1, 1)\n nn.Dropout(dropout_p)\n )\n self.linear_layer = nn.Sequential(\n nn.Linear(num_filter, num_filter // 3),\n nn.Tanh(),\n nn.Linear(num_filter // 3, num_classes)\n )\n for p in self.attn_layer.parameters():\n if p.dim() > 1: # dim: 维度数\n nn.init.xavier_uniform_(p)\n for p in self.cnn_layer.parameters():\n if p.dim() > 1: # dim: 维度数\n nn.init.xavier_uniform_(p)\n for p in self.linear_layer.parameters():\n if p.dim() > 1: # dim: 维度数\n nn.init.xavier_uniform_(p)\n\n def forward(self, x): # x: (batch, max_sen_len)\n x = self.w2v(x) # (batch_size, max_sen_len, d_w)\n out = self.attn_layer(x) # (batch_size, max_sen_len, d_w)\n out = torch.unsqueeze(out, dim=1) # (batch_size, 1, max_sen_len, d_w)\n out = self.cnn_layer(out) # (batch_size, num_filter, 1, 1)\n out = out.view(out.shape[0], -1) # (batch_size, num_filter)\n out = self.linear_layer(out) # (batch_size, 2)\n return out\n\n\nif __name__ == \"__main__\":\n from data_fetcher.dataFetcher import SenSemEvalDataSet\n train_requirement = {\"num_epoch\": 20, \"batch_size\": 32, \"lr\": 3e-4}\n hyper_parameter = {\"d_w\": 50, \"hidden_dim\": 128, \"num_layers\": 2, \"num_heads\": 1, \"window_size\": 3, \"num_filter\": 128}\n train_data_set = SenSemEvalDataSet(\"../data/train.txt\", \"../data/word_embedding/glove.6B.50d.txt\", 50, True)\n test_data_set = SenSemEvalDataSet(\"../data/test.txt\", \"../data/word_embedding/glove.6B.50d.txt\", 50, 150, True)\n model = AttnCnnModel(train_data_set, test_data_set, hyper_parameter, train_requirement)\n\n","sub_path":"model/AttnCnn_model.py","file_name":"AttnCnn_model.py","file_ext":"py","file_size_in_byte":4952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"575346458","text":"from bs4 import BeautifulSoup\nfrom urllib2 import Request,urlopen\nfrom numpy import array,unique,arange,random\nfrom readability.readability import Document\nimport re\nfrom stemming.porter2 import stem\n\nglobal cwords\ncw = open(\"cwords2\",'r')\ncwords = cw.read()\ncwords = cwords.split()\n\nurl = \"http://www.technologyreview.com/news/532826/material-cools-buildings-by-sending-heat-into-space/\"\n\ndef retrieveHtml(url):\n '''open url and retrieve html'''\n headers = { 'User-Agent' : 'Mozilla/5.0' }\n req = Request(url, None, headers)\n html = urlopen(req).read()\n \n return html\n\ndef cleanHtmlToText(html):\n '''clean html and return list of words to make it brainspeed readable'''\n title = Document(html).short_title()\n html = Document(html).summary()\n soup = BeautifulSoup(html)\n \n for script in soup([\"script\", \"style\"]):\n script.extract() # rip it out\n \n text = soup.get_text()\n \n dicText = {'text': text , 'title' : title}\n \n return dicText\n \ndef prepareText(dicText):\n text = dicText['text']\n wordListRead = text.split()\n text = re.sub(\"[,.']\",\" \",text)\n text = text.lower()\n \n wordList = [x for x in wordListRead if x.isalpha()]\n \n try:\n title = dicText['title']\n except:\n title = \" \".join(wordList[:10])\n \n return {'wordList' : wordList , 'title' : title,'wordListRead' : wordListRead}\n\n\ndef removeCwords(wordList):\n '''remove common words'''\n return [x for x in wordList if x not in cwords]\n\n\ndef selectWords(wordList,n,countBoundaries=[0,5],lenBoundaries=[5,100]):\n '''select words for recall questions with two possible right answers: \n (i) exact word\n (ii) root word (e.g., radiates => radiat)\n '''\n \n uniq = unique(wordList)\n count = array([wordList.count(x) for x in uniq])\n countDic = dict(zip(*[uniq,count]))\n \n stemWdl = [stem(w) for w in wordList]\n uStemWdl = [stem(w) for w in uniq]\n \n countStemWdl = [stemWdl.count(x) for x in uStemWdl]\n\n uStemDic = dict(zip(*[uniq,uStemWdl])) \n stemCountDic = dict(zip(*[uStemWdl,countStemWdl]))\n \n lengths = array([len(i) for i in uniq])\n cCount = (count >= countBoundaries[0])*(count <= countBoundaries[1])\n cLengths = (lengths >= lenBoundaries[0])*(lengths <= lenBoundaries[1])\n uWord = uniq[cCount*cLengths]\n count = count[cCount*cLengths]\n index = arange(len(uWord))\n random.shuffle(index)\n index = index[:n]\n \n uCountStem = []\n \n dic = {}\n for i in index:\n uw = uWord[i]\n #print i,uw,count[i],uStemDic[uw],stemCountDic[uStemDic[uw]]\n dic[uw] = list([count[i].item(),stemCountDic[uStemDic[uw]]])\n \n uCountStem.append(stemCountDic[uStemDic[uw]])\n \n #return uWord[index],count[index],uCountStem\n return dic\n\n\n\n","sub_path":"textPrepare.py","file_name":"textPrepare.py","file_ext":"py","file_size_in_byte":2819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"228043741","text":"from matplotlib import pyplot as plt\nimport tensorflow as tf\nfrom tensorflow.python.ops import parsing_ops\n\nimport numpy as np\nimport skimage.io as io\nimport util\n\ndata_files = [util.io.get_absolute_path('~/dataset_nfs/FSNS/train-00001-of-00512')]\nfilename_queue = tf.train.string_input_producer(\n data_files, capacity=100)\nreader = tf.TFRecordReader()\n_, example_serialized = reader.read(filename_queue)\nexample_serialized = tf.reshape(example_serialized, shape=[])\nfeatures = tf.parse_single_example(\n example_serialized,\n {'image/encoded': parsing_ops.FixedLenFeature(\n [1], dtype=tf.string, default_value=''),\n 'image/text': parsing_ops.FixedLenFeature(\n [1], dtype=tf.string, default_value=''),\n 'image/class': parsing_ops.VarLenFeature(dtype=tf.int64),\n 'image/unpadded_class': parsing_ops.VarLenFeature(dtype=tf.int64),\n 'image/height': parsing_ops.FixedLenFeature(\n [1], dtype=tf.int64, default_value=1),\n 'image/width': parsing_ops.FixedLenFeature(\n [1], dtype=tf.int64, default_value=1)})\n#labels = features['image/unpadded_class']\nlabels = features['image/class']\nlabels = tf.serialize_sparse(labels)\nimage_buffer = tf.reshape(features['image/encoded'], shape=[], name='encoded')\nimage = tf.image.decode_png(image_buffer, channels=3)\n\nheight = tf.reshape(features['image/height'], [-1])\nwidth = tf.reshape(features['image/width'], [-1])\ntext = tf.reshape(features['image/text'], shape=[])\n\nimages_and_label_lists = [[image, height, width, labels, text]]\n\nimages, heights, widths, labels, texts = tf.train.batch_join(\n images_and_label_lists,\n batch_size=1,\n capacity=16 * 10,\n dynamic_pad=True)\nwith tf.Session() as sess:\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(coord=coord) \n data = []\n for i in range(100):\n imgs, txts= sess.run([images, texts])\n #util.img.imwrite('~/temp_nfs/no-use/fsns/%d.jpg'%(i), imgs[0, ...], rgb = True)\n image_data = imgs[0, ...]\n image_data = np.transpose(image_data, [0, 2, 1]);\n image_data = np.reshape(image_data, [150, 3, 4, 150])\n image_data = np.transpose(image_data, [2, 0, 3, 1])\n for idx, I in enumerate(image_data):\n# util.cit(I)\n util.img.imwrite('~/temp_nfs/no-use/fsns/%d_%d.jpg'%(i, idx), I, rgb = True)\n coord.request_stop() \n coord.join(threads)\n","sub_path":"street/python-upgraded/gen_jpg.py","file_name":"gen_jpg.py","file_ext":"py","file_size_in_byte":2384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"447436945","text":"from docutils.core import publish_parts\nfrom jinja2 import Template\nfrom jinja2 import Environment, PackageLoader, select_autoescape, Markup\nfrom pathlib import Path\nimport json\nimport yaml\nfrom pathlib import Path\nfrom collections import OrderedDict\nimport os\n\n# All hail SO: https://stackoverflow.com/questions/11309885/jinja2-restructured-markup\ndef rst_filter(s):\n if not s: return ''\n if len(s) == 0:\n return ''\n return Markup(publish_parts(source=s, writer_name='html')['body'])\n\ndef max_type(value):\n type_map = {\n 'float':'float64',\n 'long': 'int',\n 'buffer':'symbol',\n 'enum':'int',\n 'symbol':'symbol',\n 'fft': 'int', \n 'dataset': 'symbol',\n 'labelset': 'symbol'\n }\n # print(value)\n return type_map[value] if value in type_map else 'UNKOWN({})'.format(value)\n # return \"atype\"\n\ndef truefalse(value):\n if value is True: return \"1\"\n if value is False: return \"0\" \n\n\n\n\n\ndef process_client(env, jsonfile):\n print(jsonfile.stem.lower())\n template = env.get_template('maxref.xml')\n data = json.load(open(jsonfile.resolve()))\n human_data = {}\n human_data_path = Path('../doc/'+jsonfile.stem+'.yaml')\n if(human_data_path.exists()):\n human_data = yaml.load(open(human_data_path.resolve(),encoding='utf8'))\n # print(human_data['digest'])\n\n args={}\n attrs={}\n\n # data = dict(data) #data is in json array to preserve order,\n \n if not data: return \n # print(data)\n params = dict(data['params'])\n \n # print(params)\n params['warnings'] = {\n \"displayName\" : \"Warnings\",\n \"constraints\": {\n \"min\": 0,\n \"max\": 1\n } ,\n \"default\": 0,\n \"type\": \"long\",\n \"size\": 1,\n \"fixed\": False,\n \"description\" : \"Enable warnings to be issued whenever a parameter value is contrained (e.g. clipped)\"\n }\n\n if jsonfile.stem.lower().startswith('buf'):\n params['blocking'] = {\n \"displayName\" : \"Blocking Mode\",\n \"default\": 1,\n \"fixed\": False,\n \"size\": 1,\n \"type\": \"enum\",\n \"values\": [\n \"Non-Blocking\",\n \"Blocking (Low Priority)\",\n \"Blocking (High Priority)\"\n ],\n \"enum\": {\n \"Non-Blocking\": \"Processing runs in a worker thread\",\n \"Blocking (Low Priority)\" : \"Processing runs in the main application thread\",\n \"Blocking (High Priority)\" : \"(Max only) Processing runs in the scheduler thread\"\n },\n \"description\" : \"Set the threading mode for the object\"\n }\n params['queue'] = {\n \"displayName\" : \"Non-Blocking Queue Flag\",\n \"default\": 0,\n \"fixed\": False,\n \"size\": 1,\n \"type\": \"long\",\n \"description\" : \"In non-blocking mode enable jobs to be queued up if successive bangs are sent whilst the object is busy. With the queue disabled, successive bangs will produce a warning. When enabled, the object will processing successively, against the state of its parameters when each bang was sent\"\n }\n\n for d,v in params.items():\n # print(d)\n fixed = False;\n # description = ''\n\n param = {}\n\n param.update({d.lower():v})\n \n # if(d): print(d)\n # if(human_data and human_data['parameters']): print(\"I haz yaml\")\n\n if human_data and human_data['parameters'] and d in human_data['parameters']:\n if 'description' in human_data['parameters'][d]:\n param[d.lower()].update({'description': human_data['parameters'][d]['description']})\n if 'enum' in human_data['parameters'][d] and 'values' in v:\n param[d.lower()]['enum'] = dict(zip(v['values'],human_data['parameters'][d]['enum'].values()))\n\n\n if human_data and human_data['parameters'] and d == 'fftSettings':\n fftdesc ='FFT settings consist of three numbers representing the window size, hop size and FFT size:\\n'\n if 'windowSize' in human_data['parameters']:\n fftdesc += ' \\n* ' + human_data['parameters']['windowSize']['description'];\n if 'hopSize' in human_data['parameters']:\n fftdesc += ' \\n* ' + human_data['parameters']['hopSize']['description'];\n if 'fftSize' in human_data['parameters']:\n fftdesc += ' \\n* ' + human_data['parameters']['fftSize']['description'];\n fftdesc += '\\n'\n param[d.lower()].update({'description': fftdesc})\n\n\n if 'fixed' in v:\n fixed = v['fixed']\n if fixed:\n args.update(param)\n else:\n attrs.update(param)\n \n messages = dict(data['messages'])\n\n for d,v in messages.items():\n if human_data and human_data['messages'] and d in human_data['messages']:\n if 'description' in human_data['messages'][d]:\n messages[d].update({'description': human_data['messages'][d]['description']})\n\n # print(args)\n digest = human_data['digest'] if 'digest' in human_data else 'A Fluid Decomposition Object'\n description = human_data['description'] if 'description' in human_data else ''\n discussion = human_data['discussion'] if 'discussion' in human_data else ''\n client = 'fluid.{}~'.format(jsonfile.stem.lower())\n attrs = OrderedDict(sorted(attrs.items(), key=lambda t: t[0]))\n with open('../maxref/{}.maxref.xml'.format(client),'w',encoding='utf8') as f:\n f.write(template.render(\n arguments=args,\n attributes=attrs,\n messages=messages,\n client_name=client,\n digest=digest,\n description=description,\n discussion=discussion\n ))\n\n #Also return a dictionary summarizing the object for obj-qlookup.json\n objLookupEntry = {\n 'digest': digest,\n 'module':'fluid decomposition',\n 'keywords':[],\n 'category': [],\n 'seealso': []\n }\n\n return objLookupEntry;\n\n\ndef main():\n env = Environment(\n loader=PackageLoader('MakeMaxRef', 'templates'),\n autoescape=select_autoescape(['html', 'xml'])\n )\n env.filters['maxtype'] = max_type\n env.filters['rst'] = rst_filter\n env.filters['truefalse'] = truefalse\n p = Path('../json')\n clients = list(p.glob('**/*.json'))\n out = Path('../maxref')\n out.mkdir(exist_ok=True)\n for c in clients:\n process_client(env, c)\n # process_client(env, Path('../json/NMFFilter.json'))\n\nif __name__== \"__main__\":\n main()\n\n# print(clients)\n\n# print(template.render(client_name='AClient'))\n","sub_path":"script/MakeMaxRef.py","file_name":"MakeMaxRef.py","file_ext":"py","file_size_in_byte":6702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"592460978","text":"# Copyright (C) 2020, M.B.Moessner\n#\n# SPDX-License-Identifier: Apache-2.0 \n#\n\nimport argparse\nfrom scripts.Utils import *\n\nCTNG_BIN_DIR = os.getcwd() + \"/ctng\"\nCTNG_BINARY = os.getcwd() + \"/ctng/bin/ct-ng\" \nCTNG_REPO = \"https://github.com/crosstool-ng/crosstool-ng\"\nCTNG_COMMIT_HASH = \"3f461da11f1f8e9dcfdffef24e1982b5ffd10305\"\n\n\ndef apply_patches(topDir, srcDir, patchDir):\n\n patches = sorted(glob.glob(patchDir + \"/*.patch\"))\n\n for patch in patches:\n logger.info(\"Apply patch: {0}\".format(patch))\n rela = os.path.relpath(topDir, srcDir)\n relb = os.path.relpath(patchDir, topDir)\n cmd = \"patch -p1 < {0}\".format(patch)\n run_cmd(cmd, srcDir)\n\ndef build_ctng(topDir, ctngSrc):\n logger.info(\"Boostrap crosstool-ng source\")\n cmd = \"./bootstrap\"\n run_cmd(cmd, ctngSrc)\n cmd_env = os.environ.copy()\n cmd_env[\"CFLAGS\"] = \"-DKBUILD_NO_NLS\"\n cmd = \"./configure --prefix={0}\".format(CTNG_BIN_DIR)\n run_cmd(cmd, ctngSrc, env=cmd_env)\n cmd = \"make\"\n run_cmd(cmd, ctngSrc, env=cmd_env)\n cmd = \"make install\"\n run_cmd(cmd, ctngSrc, env=cmd_env)\n\n\ndef fetch_ctng(topDir, tmpDir, forceRebuild):\n\n ctngSrc = tmpDir + \"/crosstool-ng\"\n\n if forceRebuild == 1:\n if os.path.exists(CTNG_BIN_DIR):\n shutil.rmtree(CTNG_BIN_DIR, ignore_errors=True)\n\n \n logger.info(\"Check if crosstool-ng has already been build...\")\n\n if not os.path.isfile(CTNG_BINARY):\n logger.info(\"Binary not found\")\n \n if os.path.exists(ctngSrc):\n shutil.rmtree(ctngSrc, ignore_errors=True)\n\n logger.info(\"Download crosstool-ng source\")\n\n cmd = \"git clone {0}\".format(CTNG_REPO)\n run_cmd(cmd, tmpDir)\n\n cmd = \"git checkout {0}\".format(CTNG_COMMIT_HASH)\n run_cmd(cmd, ctngSrc)\n\n apply_patches(topDir, ctngSrc ,topDir + \"/patches/crosstool-ng\")\n\n build_ctng(topDir, ctngSrc)\n \n\ndef build_toolchain(topDir,tmpDir, arch, opsys):\n\n buildDir = tmpDir+\"/build_\" + arch + \"_\" + opsys\n outputDir = topDir+\"/out_\" + arch + \"_\" + opsys\n cfgInputFile = topDir + \"/configs/\" + arch + \".config\"\n\n if not os.path.isfile(cfgInputFile):\n logger.error(\"Configuration for target {0} not found\".format(arch))\n sys.exit(-1)\n\n shutil.copy(cfgInputFile, tmpDir)\n cfgInputFile = tmpDir + \"/\" + arch + \".config\"\n\n if opsys == \"windows\":\n f=open(cfgInputFile, \"a+\")\n f.write(\"CT_CANADIAN=y\\n\")\n f.write(\"CT_HOST=\\\"x86_64-w64-mingw32\\\"\\n\")\n f.close()\n\n\n if os.path.exists(buildDir):\n shutil.rmtree(buildDir, ignore_errors=True)\n if os.path.exists(outputDir):\n shutil.rmtree(outputDir, ignore_errors=True)\n\n os.mkdir(buildDir)\n\n cmd = \"{0} clean\".format(CTNG_BINARY)\n run_cmd(cmd, buildDir)\n cmd = \"{0} defconfig DEFCONFIG={1}\".format(CTNG_BINARY, cfgInputFile)\n run_cmd(cmd, buildDir)\n cmd = \"{0} savedefconfig DEFCONFIG={1}.config\".format(CTNG_BINARY,arch)\n run_cmd(cmd, buildDir)\n cmd = \"{0} build.{1}\".format(CTNG_BINARY,psutil.cpu_count())\n run_cmd_ng(cmd, buildDir)\n\n\n\nif __name__ == '__main__':\n argParser = argparse.ArgumentParser(description='Open SICK AG SDK')\n optArgs = argParser._action_groups.pop()\n optArgs.add_argument('-a', '--architecture',\n help=\"select the target architecture\"\n \"(default=arm)\",\n default=\"arm\")\n optArgs.add_argument('-host', '--hostos',\n help=\"select the host operating system\"\n \"(default=linux)\",\n default=\"linux\")\n \n \n argParser._action_groups.append(optArgs)\n args = argParser.parse_args()\n cwd = os.getcwd()\n tmpDir = cwd + \"/tmp\"\n \n logger.info(\"Initialize...\")\n logger.info(\"Current directory: {0}\".format(cwd))\n\n if os.path.exists(tmpDir):\n shutil.rmtree(tmpDir, ignore_errors=True)\n os.mkdir(tmpDir)\n\n fetch_ctng(cwd,tmpDir,0)\n\n build_toolchain(cwd,tmpDir,args.architecture,args.hostos)\n\n","sub_path":"build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":4075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"324011293","text":"# 2. Написать два алгоритма нахождения i-го по счёту простого числа.\r\n# Функция нахождения простого числа должна принимать на вход натуральное и возвращать соответствующее простое число.\r\n# Проанализировать скорость и сложность алгоритмов.\r\n# Первый — с помощью алгоритма «Решето Эратосфена».\r\n# Примечание. Алгоритм «Решето Эратосфена» разбирался на одном из прошлых уроков.\r\n# Используйте этот код и попробуйте его улучшить/оптимизировать под задачу.\r\n# Второй — без использования «Решета Эратосфена».\r\n\r\nimport math\r\nimport cProfile\r\nfrom timeit import timeit\r\n\r\n#Воспользуемся теоремой о простых числах, которая говорит нам, что поведения k-го простого числа p\r\n#определяется как p ~ k*ln k\r\n#Умножим на 2 для надежности: p = 2*k*ln(k)\r\n\r\ndef Erastofen(k):\r\n p = int(2*math.log(k)*k)\r\n sieve = [i for i in range (p)]\r\n sieve[1] = 0\r\n\r\n for i in range (p):\r\n\r\n if sieve[i]!=0:\r\n j= i*2\r\n\r\n while j: 1( < module >)\r\n# 1 1.069 1.069 1.312 1.312 les_4_task_2.py: 17(Erastofen)\r\n# 1 0.174 0.174 0.174 0.174 les_4_task_2.py: 19( < listcomp >)\r\n# 1 0.069 0.069 0.069 0.069 les_4_task_2.py: 31( < listcomp >)\r\n# 1 0.021 0.021 1.333 1.333 les_4_task_2.py: 61(test_1)\r\n# 1 0.000 0.000 1.333 1.333 {built - in method builtins.exec}\r\n# 1 0.000 0.000 0.000 0.000 {built - in method math.log}\r\n# 1 0.000 0.000 0.000 0.000 {method 'disable' of '_lsprof.Profiler' objects}\r\n#\r\n\r\n# Алгоритм работает приблизительно за O(n*ln(n)). (В реальности O(n*ln(ln(n)))\r\n# Т.к. при увеличении количества элементов в 10 раз на каждый запуск\r\n# время выполнения увеличивалось приблизительно в 20 раз.\r\n\r\nprint(timeit('test_2()', globals={'test_2': test_2}, number=100))\r\n# n= 10\r\n#0.0006279224909147173\r\n# n= 100\r\n# 0.034310257155875\r\n# n= 1000\r\n# 3.171250542317902\r\n# n= 10000\r\n# 359.2339087475295\r\n\r\n#cProfile.run('test_2()')\r\n#n = 100000\r\n\r\n# 100004 function calls in 324.716 seconds\r\n# Ordered by: standard name\r\n#\r\n# ncalls tottime percall cumtime percall filename:lineno(function)\r\n# 1 0.000 0.000 324.716 324.716 :1()\r\n# 1 324.650 324.650 324.715 324.715 les_4_task_2.py:38(search_prime)\r\n# 1 0.001 0.001 324.716 324.716 les_4_task_2.py:65(test_2)\r\n# 1 0.000 0.000 324.716 324.716 {built-in method builtins.exec}\r\n# 99999 0.065 0.000 0.065 0.000 {method 'append' of 'list' objects}\r\n# 1 0.000 0.000 0.000 0.000 {method 'disable' of '_lsprof.Profiler' objects}\r\n\r\n# Алгоритм работает приблизительно за O(n^2).\r\n# Т.к. при увеличении количества элементов в 10 раз на каждый запуск\r\n# время выполнения увеличивалось приблизительно в 100 раз.\r\n# Однако, при малых n (n<100) он работает быстрее решета эрастофена\r\n\r\n","sub_path":"Geekbrains/phyton/домашняя работа 4/les_4_task_2.py","file_name":"les_4_task_2.py","file_ext":"py","file_size_in_byte":4925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"68671510","text":"#!/usr/bin/env python3\n\n\"\"\"\nPyAudio Example: Records 5 seconds of audio and plays it back\n\nBased on https://stackoverflow.com/questions/46768459/python-recording-and-playing-microphone-input\n\"\"\"\n\nimport pyaudio\n\nCHUNK = 1024\nWIDTH = 2\nCHANNELS = 2\nRATE = 44100\nRECORD_SECONDS = 5\n\np = pyaudio.PyAudio()\n\nstream = p.open(format=p.get_format_from_width(WIDTH),\n channels=CHANNELS,\n rate=RATE,\n input=True,\n output=True,\n frames_per_buffer=CHUNK)\n\ndata = []\n\nprint(\"* recording started\")\nfor i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):\n data.append(stream.read(CHUNK)) #read audio stream\n\nprint(\"* recording done\")\n\nprint(\"* playback started\")\nfor el in data:\n stream.write(el) #play back audio stream\nprint(\"* playback done\")\n\nstream.stop_stream()\nstream.close()\n\np.terminate()","sub_path":"speech-recognition/microphone_test.py","file_name":"microphone_test.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"255144124","text":"'''\r\nMethods to read and manipulate Open Street Maps\r\n\r\nClasses and methods to convert OSM to NetworkX Graphs based on \r\nhttps://gist.github.com/aflaxman/287370/\r\nBased on osm.py from brianw's osmgeocode\r\nhttp://github.com/brianw/osmgeocode, which is based on osm.py from\r\ncomes from Graphserver:\r\nhttp://github.com/bmander/graphserver/tree/master and is copyright (c)\r\n2007, Brandon Martin-Anderson under the BSD License\r\n'''\r\n\r\nimport xml.sax\r\nimport copy\r\nimport networkx\r\nimport numpy as np\r\nimport collections\r\nimport csv\r\nfrom math import radians, cos, sin, asin, sqrt, atan2, pi, degrees\r\n\r\ndef download_osm(left, bottom, right, top):\r\n ''' Return a filehandle to the downloaded data.'''\r\n import urllib\r\n fp = urllib.request.urlopen( \"http://api.openstreetmap.org/api/0.6/map?bbox=%f,%f,%f,%f\"%(left,bottom,right,top) )\r\n return fp\r\n\r\ndef bearing(start_lat, start_lon, end_lat, end_lon, normalize=True):\r\n '''\r\n Calculates bearing between two points in degrees. Will return in rangs -180 to 180 if normalize = False\r\n Else will return in range of 0-360 with 0 at North and increasing clockwise\r\n \r\n Args\r\n ----------\r\n start_lat : Decimal degrees of lat\r\n start_lon: Decimal degrees of lon\r\n end_lat : Decimal degrees of lat\r\n end_lon : Decimal degrees of lon\r\n normalize : Convert bearing range from -180:180 to 0:360 range\r\n \r\n Returns \r\n ----------\r\n bearing : degrees in range 0-360\r\n \r\n '''\r\n # convert decimal degrees to radians \r\n start_lon, start_lat, end_lon, end_lat = map(radians, [start_lon, start_lat, end_lon, end_lat])\r\n \r\n bearing_rads = atan2(sin(end_lon-start_lon)*cos(end_lat),cos(start_lat)*sin(end_lat)-sin(start_lat)*cos(end_lat)*cos(end_lon-start_lon))\r\n \r\n bearing_degs = bearing_rads * 180 / pi\r\n \r\n if normalize:\r\n return (bearing_degs+360)%360\r\n else: \r\n return bearing_degs\r\n\r\ndef distance(start_lat, start_lon, end_lat, end_lon, units = 'Miles'):\r\n ''' \r\n Haversine calculation for distance \r\n\r\n Args\r\n ----------\r\n start_lat : Decimal degrees of lat\r\n start_lon: Decimal degrees of lon\r\n end_lat : Decimal degrees of lat\r\n end_lon : Decimal degrees of lon\r\n units : Default Miles. Enter \"Kilometers\" to return distance in Km.\r\n \r\n Returns\r\n ----------\r\n distance : Distance in Miles (default) or Kilometers (if specified by user)\r\n \r\n '''\r\n # convert decimal degrees to radians \r\n start_lon, start_lat, end_lon, end_lat = map(radians, [start_lon, start_lat, end_lon, end_lat])\r\n\r\n # Haversine formula \r\n dlon = end_lon - start_lon\r\n dlat = end_lat - start_lat\r\n a = sin(dlat/2)**2 + cos(start_lat) * cos(end_lat) * sin(dlon/2)**2\r\n c = 2 * asin(sqrt(a)) \r\n \r\n if units == 'Miles': \r\n r = 3956 # Radius of Earth in mi\r\n elif units == 'Kilometers':\r\n r = 6371 # Radius of Earth in km\r\n return c * r\r\n\r\ndef coords(start_lat, start_lon, distance, bearing, units = 'Miles'):\r\n ''' \r\n Haversine calculation to return end_lat, end_lon given a starting point, distance, and bearing\r\n\r\n Args\r\n ----------\r\n start_lat : Decimal degrees of lat\r\n start_lon : Decimal degrees of lon\r\n distance : Distance from starting point in miles \r\n bearing : Initial bearing (in degrees clockwise from N)\r\n units : Default Miles. Enter \"Kilometers\" to return distance in Km.\r\n \r\n Returns\r\n ----------\r\n coords : [end_lat, end_lon] in degrees\r\n \r\n '''\r\n start_lat, start_lon = map(radians, [start_lat, start_lon])\r\n \r\n if units == 'Miles': \r\n d = distance / 3956 # Radius of Earth in mi\r\n elif units == 'Kilometers':\r\n d = distance / 6371 # Radius of Earth in km\r\n \r\n end_lat = degrees(asin(sin(start_lat)*cos(d) + cos(start_lat)*sin(d)*cos(bearing)))\r\n end_lon = degrees(start_lon + atan2(sin(bearing)*sin(d)*cos(start_lat), cos(d)-sin(start_lat)*sin(end_lat)))\r\n \r\n coords = [end_lat, end_lon]\r\n return coords\r\n\r\ndef connected_ways(way_ids, osm):\r\n ''' \r\n Processes an OSM object to find only CONNECTED ways. These ways must share at least one node with another way which represents \r\n an intersection between the two ways.\r\n \r\n Args \r\n ----------\r\n way_ids : List of way_ids \r\n osm : OSM object\r\n \r\n Returns \r\n ----------\r\n way_list : List of unique connected way_ids \r\n ways_and_nodes : list of all nodes for each connected way_id\r\n \r\n ''' \r\n l = []\r\n way_list = []\r\n ways_and_nodes = []\r\n \r\n # Build out array of all way_id, node_id in osm\r\n for w in osm.ways: \r\n if w not in way_ids:\r\n continue\r\n j = osm.ways[w].id\r\n k = [i for i in osm.ways[w].nds]\r\n for i in k: \r\n l.append([j, i]) # build array of way_id, node_id for each way_id (contains all nodes for a given way_id)\r\n \r\n # Now process l to get only intersecting ways\r\n l = np.array(l) \r\n m = list(l[:,1]) # Extract all nodes\r\n cnt = collections.Counter(m)\r\n nodeList = [x for x, y in cnt.items() if y > 1] # returns all node_ids that occur more than once\r\n way_list = [x for x, y in l if y in nodeList] # returns all way_ids that have shared nodes (may have way_id listed more than once)\r\n way_list = set(way_list) # convert to set to eliminate any duplicates\r\n ways_and_nodes = [[x, y] for x, y in l if x in way_list] # returns way_id, node_ids for each way that has an intersection node (includes all nodes for each way)\r\n \r\n return way_list, ways_and_nodes\r\n \r\ndef unconnected_ways(way_ids, osm):\r\n ''' \r\n Processes an OSM object to find all ways (connected and unconnected).\r\n \r\n Args \r\n ----------\r\n way_ids : List of way_ids \r\n osm : OSM object\r\n \r\n Returns \r\n ----------\r\n way_list : List of unique way_ids \r\n ways_and_nodes : list of all nodes for each way_id\r\n \r\n TODO: \r\n - Determine how to process way_ids that are connected to buildings, etc. \r\n \r\n '''\r\n ways_and_nodes = []\r\n\r\n # Build out array of all way_id, node_id \r\n for w in osm.ways: \r\n if w not in way_ids:\r\n continue\r\n j = osm.ways[w].id\r\n k = [i for i in osm.ways[w].nds]\r\n\r\n for i in k: \r\n ways_and_nodes.append([j, i]) # build array of way_id, node_id for each way_id (contains all nodes for a given way_id)\r\n \r\n way_ids = set(way_ids) # Convert to set to ensure no duplicates in returned list\r\n \r\n return way_ids, ways_and_nodes\r\n \r\ndef osm_way_matrix(filename_or_stream, only_connected=False, only_roads=False, only_include=False, exclude=False, units='Miles'):\r\n '''\r\n Args\r\n ----------\r\n filename_or_stream : osm file\r\n only_connected : Default False. Set True if user wants only ways that have at least one intersection with another way.\r\n only_roads : Default False. Set True if want to include ways that don't have the 'highway' tag\r\n only_include : Default False. Pass a list of 'highway' way values that should only be included (all unlisted values will be excluded)\r\n exclude : Default False. Pass a list of 'highway' way values that should be excluded (all unlisted values will be included) \r\n units : Default Miles. Use 'Kilometers' to return way_length in Km. \r\n \r\n Returns\r\n ----------\r\n output_matrix : Matrix of [[way_id, way_length, bearing, start_node, end_node, tags]]\r\n \r\n TODO: \r\n - Remove len calculation for non-street nodes (buildings, etc)\r\n\r\n '''\r\n osm = OSM(filename_or_stream)\r\n way_ids = []\r\n \r\n # ----------------------------------------------------------------------------------\r\n # Build out way_id and way_and_nodes list based on args passed to the function\r\n # ---------------------------------------------------------------------------------- \r\n \r\n if only_include or exclude:\r\n \r\n if only_connected:\r\n \r\n if only_roads:\r\n \r\n for w in osm.ways.values():\r\n if 'highway' not in w.tags and only_roads:\r\n continue\r\n if only_include and w.tags.get('highway') not in only_include:\r\n continue\r\n if exclude and w.tags.get('highway') in exclude:\r\n continue\r\n \r\n way_ids.append(w.id)\r\n way_list, ways_and_nodes = connected_ways(way_ids, osm)\r\n \r\n else:\r\n \r\n for w in osm.ways.values():\r\n if only_include and w.tags.get('highway') not in only_include:\r\n continue\r\n if exclude and w.tags.get('highway') in exclude:\r\n continue\r\n way_ids.append(w.id)\r\n way_list, ways_and_nodes = connected_ways(way_ids, osm)\r\n \r\n else:\r\n \r\n if only_roads:\r\n\r\n for w in osm.ways.values():\r\n if 'highway' not in w.tags and only_roads:\r\n continue\r\n if only_include and w.tags.get('highway') not in only_include:\r\n continue\r\n if exclude and w.tags.get('highway') in exclude:\r\n continue\r\n way_ids.append(w.id) \r\n way_list, ways_and_nodes = unconnected_ways(way_ids, osm)\r\n \r\n else:\r\n\r\n for w in osm.ways.values():\r\n if only_include and w.tags.get('highway') not in only_include:\r\n continue\r\n if exclude and w.tags.get('highway') in exclude:\r\n continue\r\n way_ids.append(w.id) \r\n way_list, ways_and_nodes = unconnected_ways(way_ids, osm)\r\n \r\n else:\r\n \r\n if only_connected:\r\n \r\n if only_roads:\r\n \r\n for w in osm.ways.values():\r\n if 'highway' not in w.tags and only_roads:\r\n continue \r\n way_ids.append(w.id) \r\n way_list, ways_and_nodes = connected_ways(way_ids, osm)\r\n \r\n else:\r\n \r\n for w in osm.ways.values():\r\n way_ids.append(w.id)\r\n way_list, ways_and_nodes = connected_ways(way_ids, osm)\r\n \r\n else:\r\n \r\n if only_roads:\r\n \r\n for w in osm.ways.values():\r\n if 'highway' not in w.tags and only_roads:\r\n continue \r\n way_ids.append(w.id) \r\n way_list, ways_and_nodes = unconnected_ways(way_ids, osm)\r\n \r\n else:\r\n \r\n for w in osm.ways.values():\r\n way_ids.append(w.id) \r\n way_list, ways_and_nodes = unconnected_ways(way_ids, osm)\r\n \r\n # ----------------------------------------------------------------------------------\r\n # Build final matrix and return output using the correct way_id and way_and_nodes lists.\r\n # ---------------------------------------------------------------------------------- \r\n \r\n way_lengths = {}\r\n output_matrix = []\r\n p = np.array(ways_and_nodes)\r\n \r\n for way_id in way_list: \r\n arr_index = np.where(p[:,0] == way_id) \r\n length = 0\r\n prev = p[arr_index, 1][0, 0]\r\n \r\n for n_id in p[arr_index, 1][0, 1:]: \r\n length += distance(osm.nodes[prev].lat, osm.nodes[prev].lon, osm.nodes[n_id].lat, osm.nodes[n_id].lon, units)\r\n prev = n_id\r\n \r\n way_lengths[way_id] = length\r\n\r\n for i in set(p[:,0]): # convert way_id's from p to a set so that multiple way_id's aren't entered\r\n way_id = osm.ways[i].id \r\n start_node = osm.ways[i].nds[0]\r\n end_node = osm.ways[i].nds[-1]\r\n tags = osm.ways[i].tags\r\n way_length = way_lengths.get(way_id)\r\n \r\n output_matrix.append([way_id, way_length, start_node, end_node, tags])\r\n \r\n return output_matrix\r\n\r\ndef test_utility(osm, filename):\r\n ''' \r\n Takes osm and outputs a csv of full path-node pairs for connected paths with standard 'exclude' values.\r\n Used for testing purposes. Can be modified into full function in future\r\n ''' \r\n map = osm\r\n way_ids = []\r\n exclude = ['motorway', 'trunk', 'motorway_link', 'trunk_link', 'bus_guideway', 'raceway', 'bridleway', 'cycleway', 'proposed', 'construction', 'escape','rest_area', 'services']\r\n way_ids = [w.id for w in map.ways.values() if 'highway' in w.tags and w.tags.get('highway') not in exclude]\r\n \r\n way_list, ways_and_nodes = connected_ways(way_ids, map)\r\n ways_nodes_coords = [[w, n, map.nodes[n].lat, map.nodes[n].lon] for w, n in ways_and_nodes]\r\n \r\n with open(filename, 'w', newline='') as f:\r\n writer = csv.writer(f)\r\n writer.writerows(ways_nodes_coords)\r\n\r\ndef read_osm(filename_or_stream, only_roads=True):\r\n '''\r\n Read graph in OSM format from file specified by name or by stream object.\r\n\r\n Args\r\n ---------\r\n filename_or_stream : filename or stream object\r\n\r\n Returns\r\n ---------\r\n G : Graph\r\n\r\n Examples\r\n --------\r\n >>> G=nx.read_osm(nx.download_osm(-122.33,47.60,-122.31,47.61))\r\n >>> plot([G.node[n]['data'].lat for n in G], [G.node[n]['data'].lon for n in G], ',')\r\n\r\n '''\r\n osm = OSM(filename_or_stream)\r\n G = networkx.Graph()\r\n\r\n for w in osm.ways.values():\r\n if only_roads and 'highway' not in w.tags:\r\n continue\r\n G.add_path(w.nds, id=w.id, data=w)\r\n for n_id in G.nodes_iter():\r\n n = osm.nodes[n_id]\r\n G.node[n_id] = dict(data=n)\r\n return G\r\n \r\nclass Node:\r\n def __init__(self, id, lon, lat):\r\n self.id = id\r\n self.lon = lon\r\n self.lat = lat\r\n self.tags = {}\r\n \r\nclass Way:\r\n def __init__(self, id, osm):\r\n self.osm = osm\r\n self.id = id\r\n self.nds = []\r\n self.tags = {}\r\n \r\n def split(self, dividers):\r\n # slice the node-array using this nifty recursive function\r\n def slice_array(ar, dividers):\r\n for i in range(1,len(ar)-1):\r\n if dividers[ar[i]]>1:\r\n #print \"slice at %s\"%ar[i]\r\n left = ar[:i+1]\r\n right = ar[i:]\r\n \r\n rightsliced = slice_array(right, dividers)\r\n \r\n return [left]+rightsliced\r\n return [ar]\r\n \r\n slices = slice_array(self.nds, dividers)\r\n \r\n # create a way object for each node-array slice\r\n ret = []\r\n i=0\r\n for slice in slices:\r\n littleway = copy.copy( self )\r\n littleway.id += \"-%d\"%i\r\n littleway.nds = slice\r\n ret.append( littleway )\r\n i += 1\r\n \r\n return ret\r\n \r\nclass OSM:\r\n def __init__(self, filename_or_stream):\r\n \"\"\" File can be either a filename or stream/file object.\"\"\"\r\n nodes = {}\r\n ways = {}\r\n \r\n superself = self\r\n \r\n class OSMHandler(xml.sax.ContentHandler):\r\n @classmethod\r\n def setDocumentLocator(self,loc):\r\n pass\r\n \r\n @classmethod\r\n def startDocument(self):\r\n pass\r\n \r\n @classmethod\r\n def endDocument(self):\r\n pass\r\n \r\n @classmethod\r\n def startElement(self, name, attrs):\r\n if name=='node':\r\n self.currElem = Node(attrs['id'], float(attrs['lon']), float(attrs['lat']))\r\n elif name=='way':\r\n self.currElem = Way(attrs['id'], superself)\r\n elif name=='tag':\r\n self.currElem.tags[attrs['k']] = attrs['v']\r\n elif name=='nd':\r\n self.currElem.nds.append( attrs['ref'] )\r\n \r\n @classmethod\r\n def endElement(self,name):\r\n if name=='node':\r\n nodes[self.currElem.id] = self.currElem\r\n elif name=='way':\r\n ways[self.currElem.id] = self.currElem\r\n \r\n @classmethod\r\n def characters(self, chars):\r\n pass\r\n\r\n xml.sax.parse(filename_or_stream, OSMHandler)\r\n \r\n self.nodes = nodes\r\n self.ways = ways\r\n \r\n #count times each node is used\r\n node_histogram = dict.fromkeys( self.nodes.keys(), 0 )\r\n for way in self.ways.values():\r\n if len(way.nds) < 2: #if a way has only one node, delete it out of the osm collection\r\n del self.ways[way.id]\r\n else:\r\n for node in way.nds:\r\n node_histogram[node] += 1\r\n \r\n #use that histogram to split all ways, replacing the member set of ways\r\n new_ways = {}\r\n for id, way in self.ways.items():\r\n split_ways = way.split(node_histogram)\r\n for split_way in split_ways:\r\n new_ways[split_way.id] = split_way\r\n self.ways = new_ways\r\n","sub_path":"RouteFinder/osmmethods.py","file_name":"osmmethods.py","file_ext":"py","file_size_in_byte":17594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"204880086","text":"class Stack:\n def __init__(self):\n self.__arr = []\n\n def push(self, a):\n self.__arr.append(a)\n\n def back(self):\n return self.__arr[-1]\n\n def pop(self):\n return self.__arr.pop()\n\n def clear(self):\n self.__arr = []\n\n def size(self):\n return len(self.__arr)\n\n\nstack = Stack()\n\ninput_str = input()\n\nsucceed = True\n\nfor ch in input_str:\n if ch == '(' or ch == '[' or ch == '{':\n stack.push(ch)\n if ch == ')' or ch == ']' or ch == '}':\n if stack.size() == 0:\n succeed = False\n break\n if (stack.back() == '(' and ch == ')' or\n stack.back() == '[' and ch == ']' or\n stack.back() == '{' and ch == '}'):\n stack.pop()\n else:\n succeed = False\n break\n\nif stack.size() == 0 and succeed:\n print('yes')\nelse:\n print('no')\n","sub_path":"stack/c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"532455205","text":"import os.path\nfrom subprocess import call\nimport logging as log\n\nclass Blast2hist:\n\n\tdef __init__ (self, args):\n\t\tself.check_args(args)\n\t\tself.cmd = []\n\t\tself.create_cmd()\n\n\tdef create_cmd (self):\n\t\tcmd = 'blast2hist.py --out ' + self.out\n\t\tif self.iter == 'global':\n\t\t\tfor s_id in self.blast_files:\n\t\t\t\tfor i in range(0,len(self.blast_files[s_id]['id'])):\n\t\t\t\t\tcmd += ' -i ' + self.blast_files[s_id]['id'][i]\n\t\t\t\t\tcmd += ' -b ' + self.blast_files[s_id]['csv_file'][i]\n\t\tlog.debug(cmd)\n\t\tself.cmd.append(cmd)\n\n\n\tdef check_args (self, args: dict):\n\t\tself.execution=1\n\t\tif 'out' in args:\n\t\t\tself.out = args['out']\n\t\tif 'sge' in args:\n\t\t\tself.sge = bool(args['sge'])\n\t\telse:\n\t\t\tself.sge = False\n\t\tif 'n_cpu' in args:\n\t\t\tself.n_cpu = str(args['n_cpu'])\n\t\telse:\n\t\t\tself.n_cpu = '1'\n\t\tself.wd = os.getcwd()\n\t\tself.cmd_file = self.wd + '/' + 'blast2hist_cmd.txt'\n\t\tif 'iter' in args:\n\t\t\tif args['iter'] == 'global':\n\t\t\t\tself.iter = 'global'\n\t\t\t\tself.blast_files = {}\n\t\t\t\tfor s_id in args['args']:\n\t\t\t\t\tfor i in range(1, 100, 1):\n\t\t\t\t\t\tid_name = 'id' + str(object=i)\n\t\t\t\t\t\topt_name = 'b' + str(object=i)\n\t\t\t\t\t\tif id_name not in args['args'][s_id] and opt_name not in args['args'][s_id]:\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\tif opt_name in args['args'][s_id]:\n\t\t\t\t\t\t\tif os.path.exists(self.wd + '/' + s_id + '/' + args['args'][s_id][opt_name]):\n\t\t\t\t\t\t\t\tif s_id not in self.blast_files:\n\t\t\t\t\t\t\t\t\tself.blast_files[s_id] = {}\n\t\t\t\t\t\t\t\t\tself.blast_files[s_id]['csv_file'] = []\n\t\t\t\t\t\t\t\t\tself.blast_files[s_id]['id'] = []\n\t\t\t\t\t\t\t\tself.blast_files[s_id]['csv_file'].append(self.wd + '/' + s_id + '/' + args['args'][s_id][opt_name])\n\t\t\t\t\t\t\t\tself.blast_files[s_id]['id'].append(args['args'][s_id][id_name])\n\t\tif len(self.blast_files.keys()) == 0:\n\t\t\tself.execution=0\n\n\n\tdef _check_file (self,f):\n\t\ttry:\n\t\t\topen(f)\n\t\t\treturn f\n\t\texcept IOError:\n\t\t\tprint('File not found ' + f)\n","sub_path":"launchers/Blast2hist.py","file_name":"Blast2hist.py","file_ext":"py","file_size_in_byte":1843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"157196921","text":"#!/usr/bin/env python3\n\"\"\"Given mean accuracy values by configuration for a semi-supervised learning grid search, run a linear regression to predict accuracy based on the hyperparameter values and determine how well they are correlated\"\"\"\n# Created by Brendon Matusch, Augsut 2018\n\nimport sys\n\nimport numpy as np\nfrom sklearn.linear_model import LinearRegression\n\n# The hyperparameters that we are concerned with using to predict accuracy\nHYPERPARAMETERS = ['dropout', 'l2_lambda', 'initial_threshold', 'threshold_multiplier']\n\n# Read all configuration lines provided to standard input (from the script that calculates mean by configuration) and strip whitespace\nlines = [line.strip() for line in sys.stdin.readlines() if 'Configuration:' in line]\n# Get the configuration identifier from each line\nconfigurations = [line.split()[1] for line in lines]\n# Create a NumPy array that will contain values of the hyperparameters\n# It is okay if not all of the configurations are unique because of disregarded hyperparameters; they can be used for training individually\nhyperparameter_values = np.empty((len(configurations), len(HYPERPARAMETERS)))\n# Iterate over the configurations with corresponding indices, extracting arrays of hyperparameters\nfor configuration_index, configuration in enumerate(configurations):\n # Iterate over the hyperparameters with corresponding indices, extracting them one by one\n for hyperparameter_index, hyperparameter in enumerate(HYPERPARAMETERS):\n # Split the line by the name of the hyperparameter, and then get the hyperparameter value (as a floating-point number) up to the next slash separator\n hyperparameter_values[configuration_index, hyperparameter_index] = float(configuration.split(hyperparameter)[1].split('/')[0])\n# Extract the accuracy value from each line\naccuracy_values = [float(line.split()[3]) for line in lines]\n# Create a linear regression classifier and train it to predict the accuracy values based on the hyperparameters including polynomials\nlinear_regression = LinearRegression()\nlinear_regression.fit(hyperparameter_values, accuracy_values)\n# Print the classifier's score on its training set to get an idea of the correlation between the hyperparameters and accuracy\nscore = linear_regression.score(hyperparameter_values, accuracy_values)\nprint('Score:', score)\n","sub_path":"training/hyperparameter_correlation_linear_regression.py","file_name":"hyperparameter_correlation_linear_regression.py","file_ext":"py","file_size_in_byte":2333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"389813201","text":"#!/usr/bin/env python\n\nfrom setuptools import setup\n\n\ndef get_requirements(suffix=''):\n with open('requirements%s.txt' % suffix) as f:\n rv = f.read().splitlines()\n return rv\n\n\nsetup(\n name='iris',\n version='1.1',\n author='',\n author_email='',\n description='',\n include_package_data=True,\n zip_safe=False,\n packages=['iris'],\n install_requires=get_requirements())\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"170522695","text":"# Copyright 2020 Jacob Shtabnoy \n# This source code file is available under the terms of the ISC License.\n# If the LICENSE file was not provided, you can find the full text of the license here:\n# https://opensource.org/licenses/ISC\n\"\"\"This module provides packet parsing utilities for the Classic Protocol, and the CPE.\"\"\"\nimport asyncio\nimport math\nimport struct\n\nfrom typing import List, Tuple, Any, Optional, Type\nfrom struct import Struct\n\n\nclass DataType:\n struct: Struct = None\n\n def __init__(self):\n raise RuntimeWarning(\"You do not need to construct DataType to use it.\")\n\n @classmethod\n def unpack(cls, t: tuple) -> Any:\n return t[0]\n\n @classmethod\n def to_bytes(cls, obj: Any) -> bytes:\n return cls.struct.pack(obj)\n\n @classmethod\n def from_bytes(cls, b: bytes) -> Any:\n return cls.unpack(cls.struct.unpack(b))\n\n\nclass DataPacker:\n \"\"\"Provides utilities for packing/unpacking Python values into bytes for transmission.\"\"\"\n def __init__(self, data: bytes = b''):\n self.data = data\n \"\"\"Contains the data currently stored within the packer\"\"\"\n self.__index = 0\n\n def add(self, data_type: Type[DataType], value: Any) -> None:\n \"\"\"Appends the value to the end of `data`, encoded according to the DataType enum passed.\"\"\"\n try:\n self.data += data_type.to_bytes(value)\n except struct.error as e:\n raise ValueError(f\"Error packing {value} as {data_type}\") from e\n\n def pop(self, data_type: Type[DataType]) -> Optional[Any]:\n \"\"\"Reads the specified data_type from `data`, and then removes it from `data`. If there is no more data\n remaining, or if the data_type does not fit into the current buffer, returns None.\"\"\"\n size = data_type.struct.size\n outdex = size+self.__index\n if outdex > len(self.data):\n return None\n data = self.data[self.__index:outdex]\n self.__index = outdex\n try:\n return data_type.from_bytes(data)\n except struct.error as e:\n raise ValueError(f\"Error reading {data} as {data_type}\") from e\n\n def reset(self, new_data: bytes = b'') -> None:\n \"\"\"Replaces the current value of `data` with the `new_data` argument.\"\"\"\n self.data = new_data\n self.__index = 0\n\n\nclass Position:\n \"\"\"A representation of a point in 3d space with a yaw and pitch.\"\"\"\n def __init__(self, x=0.0, y=0.0, z=0.0, yaw=1.0, pitch=1.0):\n self.x = x\n \"\"\"X coordinate of the Position\"\"\"\n self.y = y\n \"\"\"Y coordinate of the Position\"\"\"\n self.z = z\n \"\"\"Z coordinate of the Position\"\"\"\n self.yaw = yaw\n \"\"\"Yaw (or heading) of the point in space, in degrees.\"\"\"\n self.pitch = pitch\n \"\"\"Pitch of the point in space, in degrees.\"\"\"\n\n def to_list(self, rotation: bool = False) -> list:\n \"\"\"Returns the Vector as a list in XYZ order. If `rotation` is true, yaw and pitch will be added to the end.\"\"\"\n result = [self.x, self.y, self.z]\n if rotation:\n result += [self.yaw, self.pitch]\n return result\n\n def __str__(self):\n return repr(self.to_list(True))\n\n def __eq__(self, other):\n return (\n self.x == other.y and\n self.z == other.z and\n self.y == other.y and\n self.yaw == other.yaw and\n self.pitch == other.pitch\n )\n\n def __iter__(self):\n return iter(self.to_list())\n\n def __trunc__(self):\n return Position(\n x=math.trunc(self.x),\n y=math.trunc(self.y),\n z=math.trunc(self.z),\n yaw=math.trunc((self.yaw*255)/360),\n pitch=math.trunc((self.pitch*255)/360)\n )\n\n def __add__(self, other):\n if type(other) is not Position:\n other = Position(other, other, other)\n return Position(\n x=self.x+other.x,\n y=self.y+other.y,\n z=self.z+other.z,\n yaw=self.yaw+other.yaw,\n pitch=self.pitch+other.pitch\n )\n\n def __sub__(self, other):\n if type(other) is not Position:\n other = Position(other, other, other)\n return Position(\n x=self.x-other.x,\n y=self.y-other.y,\n z=self.z-other.z,\n yaw=self.yaw-other.yaw,\n pitch=self.pitch-other.pitch\n )\n\n def __mul__(self, other):\n if type(other) is not Position:\n other = Position(other, other, other)\n return Position(\n x=self.x*other.x,\n y=self.y*other.y,\n z=self.z*other.z,\n yaw=self.yaw*other.yaw,\n pitch=self.pitch*other.pitch\n )\n\n def __truediv__(self, other):\n if type(other) is not Position:\n other = Position(other, other, other)\n return Position(\n x=self.x/other.x,\n y=self.y/other.y,\n z=self.z/other.z,\n yaw=self.yaw/other.yaw,\n pitch=self.pitch/other.pitch\n )\n\n\nclass Short(DataType):\n struct = Struct(\"!h\")\n\n\nclass UnsignedByte(DataType):\n struct = Struct(\"!B\")\n\n\nclass SignedByte(DataType):\n struct = Struct(\"!b\")\n\n\nclass ByteArray(DataType):\n struct = Struct(\"!1024s\")\n\n\nclass CoarseVector(DataType):\n struct = Struct(\"!3h\")\n\n @classmethod\n def unpack(cls, t: tuple) -> Position:\n return Position(*t)\n\n @classmethod\n def to_bytes(cls, obj: Position) -> bytes:\n trunc_pos = Position.__trunc__(obj)\n return cls.struct.pack(*trunc_pos.to_list())\n\n\nclass FineVector(DataType):\n struct = Struct(\"!3h2B\")\n\n @classmethod\n def unpack(cls, t: tuple) -> Position:\n pos = Position(*t) / 32\n pos.yaw = (pos.yaw * 360) / 255\n pos.pitch = (pos.pitch * 360) / 255\n return pos\n\n @classmethod\n def to_bytes(cls, obj: Position) -> bytes:\n pre_trunc = obj * 32\n trunc_pos = Position.__trunc__(pre_trunc)\n return cls.struct.pack(*trunc_pos.to_list(True))\n\n\nclass String(DataType):\n struct = Struct(\"64s\")\n\n @classmethod\n def to_bytes(cls, obj: str) -> bytes:\n return bytes(obj.ljust(64), encoding=\"ascii\")\n\n @classmethod\n def unpack(cls, t: tuple) -> str:\n return str(t[0], encoding=\"ascii\").rstrip()\n\n\nclass PacketInfo:\n \"\"\"Contains metadata on a specific packet, such as its size, and how it should be mapped to Packet attributes.\"\"\"\n def __init__(self, packet_id: int, byte_map: List[Tuple[Type[DataType], str]]):\n self.packet_id = packet_id\n \"\"\"The ID of the packet\"\"\"\n self.byte_map: List[Tuple[Type[DataType], str]] = byte_map\n \"\"\"A list of tuples which maps data in the packet to attributes.\"\"\"\n\n def size(self):\n \"\"\"Return the size of the packet's data in bytes.\"\"\"\n reg = 0\n for entry in self.byte_map:\n reg += entry[0].struct.size\n return reg\n\n def to_packet(self, **kwargs):\n \"\"\"Returns a Packet using this PacketInfo. The items of kwargs will be set as the packet's attributes.\"\"\"\n packet = Packet(self)\n for key, value in kwargs.items():\n setattr(packet, key, value)\n return packet\n\n\nclass Packet:\n \"\"\"Represents a Classic Protocol Packet, provides utilities for parsing and transmission.\"\"\"\n\n def __init__(self, packet_info: PacketInfo):\n self.__packet_info = packet_info\n\n def __str__(self):\n return f\"Packet {self.packet_id()}\"\n\n def packet_id(self):\n return self.__packet_info.packet_id\n\n def from_bytes(self, data: bytes) -> None:\n \"\"\"Parses a raw packet into this packet using the byte map.\"\"\"\n packer = DataPacker(data)\n for mapping in self.__packet_info.byte_map:\n value = packer.pop(mapping[0])\n setattr(self, mapping[1], value)\n\n def to_bytes(self) -> bytes:\n \"\"\"Returns the packet as bytes for transmission, including the packet ID byte.\"\"\"\n packer = DataPacker()\n packer.add(UnsignedByte, self.packet_id())\n for mapping in self.__packet_info.byte_map:\n packer.add(mapping[0], getattr(self, mapping[1]))\n return packer.data\n","sub_path":"pyccs/protocol/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":8247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"645933466","text":"from typing import DefaultDict, List\n\nimport csv, io, json\nfrom django.core.exceptions import ValidationError\nfrom rest_framework import status\nfrom rest_framework.decorators import action\nfrom rest_framework.response import Response\nfrom rest_framework.viewsets import GenericViewSet, ModelViewSet\nfrom rest_framework.mixins import ListModelMixin\nfrom rest_framework.permissions import AllowAny, BasePermission\n\nfrom django.db import IntegrityError\n\n\nfrom education.serializers import (\n RegionReadSerializer,\n HouseholdReadSerializer,\n SchoolReadSerializer\n)\nfrom education.models import Region, Household, School, EducationLevel\n\n\n\nclass RegionViewSet(GenericViewSet, ListModelMixin):\n queryset = Region.objects.all()\n serializer_class = RegionReadSerializer\n\nclass HouseHoldPermission(BasePermission):\n\n def has_permission(self, request, view):\n\n return True\n \n def has_object_permission(self, request, view, obj):\n\n return True\n\nclass HouseHoldViewSet(ModelViewSet):\n queryset = Household.objects.all()\n serializer_class = HouseholdReadSerializer\n permission_classes = [HouseHoldPermission]\n \n def get_queryset(self):\n print(\"i m here\")\n queryset = super().get_queryset()\n print(queryset)\n return queryset \n\n @action(detail=False, methods=[\"GET\"],url_path=\"get-all-households\")\n def get_all_households(\n self,\n request,\n pk=None\n ):\n data = HouseholdReadSerializer(Household.objects.all(), many=True)\n return Response(\n data=data.data,\n status=status.HTTP_200_OK\n )\n \n\n\n @action(detail=False, methods=[\"POST\"], url_path=\"upload-household-csv\")\n def add_household_record(\n self,\n request,\n pk=None\n ):\n data = request.body.decode('utf-8')\n households = json.loads(data)\n for household in households:\n new_household = Household(\n Region=Region.objects.get(id=int(household[\"Region\"])),\n averageeducationlevel = EducationLevel.objects.get(id=int(household[\"educationLevel\"])),\n HouseholdIncome = household[\"householdIncome\"]\n )\n new_household.save()\n \n return Response(\n status=status.HTTP_200_OK\n )\n\n \n\nclass SchoolViewSet(ModelViewSet):\n queryset = School.objects.all()\n serializer_class = SchoolReadSerializer\n def get_queryset(self):\n queryset = super().get_queryset()\n return queryset\n \n #def create(self, request, *args, **kwargs):\n\n @action(detail=False, methods=[\"POST\"], url_path=\"upload-school-csv\")\n def upload_school_csv(\n self,\n request,\n pk=None\n ):\n data = request.body.decode('utf-8')\n schools = json.loads(data)\n for school in schools:\n mySchool = {}\n for k in school:\n mySchool[k.replace('\\r', '').replace('\\\\r', \"\")] = school[k].replace('\\r', '').replace('\\\\r', \"\");\n if (len(mySchool) == 5): # invalid data\n print(mySchool)\n subjectProv = mySchool[\"subjectsProvided\"].split(\" \")\n subjectProv = \"\".join(subjectProv)\n new_school = School(\n Region=Region.objects.get(id=int(mySchool[\"Region\"])),\n subjectProvided=subjectProv,\n numberofTeachers=int(mySchool[\"numberofTeachers\"]),\n attendanceRate=float(mySchool[\"attendanceRate\"]),\n dropoutRate=float(mySchool['dropoutRate'])\n )\n new_school.save()\n return Response(\n status=status.HTTP_200_OK\n )\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"education/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"112358558","text":"\"\"\"\nJune 23 Challenge - Count Complete Tree Nodes\nGiven a complete binary tree, count the number of nodes.\n\nNote:\n\nDefinition of a complete binary tree from Wikipedia:\nIn a complete binary tree every level, except possibly the last, is completely filled, and all nodes in the last level\n are as far left as possible. It can have between 1 and 2h nodes inclusive at the last level h.\n\nExample:\n\nInput:\n 1\n / \\\n 2 3\n / \\ /\n4 5 6\n\nOutput: 6\n\"\"\"\n\n\ndef countNodes(root):\n \"\"\"\n :type root: TreeNode\n :rtype: int\n \"\"\"\n count = 0\n if root:\n count += 1\n if root.left:\n count += countNodes(root.left)\n if root.right:\n count += countNodes(root.right)\n\n return count","sub_path":"JuneChallenge/CountCompleteTreeNodes.py","file_name":"CountCompleteTreeNodes.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"327289807","text":"\"\"\"\nraven.contrib.flask\n~~~~~~~~~~~~~~~~~~~\n\n:copyright: (c) 2010 by the Sentry Team, see AUTHORS for more details.\n:license: BSD, see LICENSE for more details.\n\"\"\"\n\nfrom __future__ import absolute_import\n\nfrom flask import request\nfrom flask.signals import got_request_exception\nfrom raven.base import Client\n\nclass Sentry(object):\n\n def __init__(self, app=None, client=None, client_cls=Client):\n self.app = app\n self.client = client\n self.client_cls = client_cls\n\n if app:\n self.init_app(app)\n\n def handle_exception(self, client):\n def _handle_exception(sender, **kwargs):\n client.create_from_exception(\n exc_info=kwargs.get('exc_info'),\n url=request.url,\n data={\n 'META': request.environ,\n 'GET': request.args,\n 'POST': request.form,\n 'app': sender.name,\n },\n )\n return _handle_exception\n\n def init_app(self, app):\n if not self.client:\n if not app.config.get('SENTRY_SERVERS'):\n raise TypeError('The SENTRY_SERVERS config variable is required.')\n client = self.client_cls(\n include_paths=app.config.get('SENTRY_INCLUDE_PATHS'),\n exclude_paths=app.config.get('SENTRY_EXCLUDE_PATHS'),\n servers=app.config.get('SENTRY_SERVERS'),\n name=app.config.get('SENTRY_NAME'),\n key=app.config.get('SENTRY_KEY'),\n )\n else:\n client = self.client\n\n got_request_exception.connect(self.handle_exception(client), sender=app, weak=False)\n","sub_path":"raven/contrib/flask/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"66493569","text":"'''\nevaluate the prediction.\n'''\nimport numpy as np\nfrom sklearn.metrics import average_precision_score\n\n\ndef eval_pred(prob, gt, mask=None):\n result = {}\n if mask is not None:\n gt = gt[:, mask]\n prob = prob[:, mask]\n gt = gt.astype(int)\n predict = (prob > .5).astype(int)\n result['aps'] = average_precision_score(gt, prob, average=None)\n\n result['accuracy'] = (predict == gt).astype(int).sum(axis=0) / gt.shape[0]\n result['tp'] = np.logical_and(predict == 1, gt == 1).astype(int).sum(axis=0) / gt.shape[0]\n result['fp'] = np.logical_and(predict == 1, gt == 0).astype(int).sum(axis=0) / gt.shape[0]\n result['tn'] = np.logical_and(predict == 0, gt == 0).astype(int).sum(axis=0) / gt.shape[0]\n result['fn'] = np.logical_and(predict == 0, gt == 1).astype(int).sum(axis=0) / gt.shape[0]\n\n print('#positive = ')\n print(gt.sum(axis=0))\n print('Accuracy =')\n print(result['accuracy'])\n print('TP = ')\n print(result['tp'])\n print('FP = ')\n print(result['fp'])\n print('TN = ')\n print(result['tn'])\n print('FN = ')\n print(result['fn'])\n print('APs =')\n print(result['aps'])\n return result\n\n\nif __name__ == '__main__':\n import numpy as np\n gt = np.load('gt.npy')\n prob = np.load('pred.npy')\n eval_pred(prob, gt)","sub_path":"dtyu/xray_learning/eval_pred.py","file_name":"eval_pred.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"565450274","text":"from django import forms\nfrom .models import bag\nclass bagform(forms.ModelForm):\n\tclass Meta:\n\t\tmodel=bag\n\t\texclude = ('gmail','pid',)\n\t\tlabels = {\n\t\t'chestsize' : \"Chest Size\",\n\t\t'waistsize':\"Waist Size\",\n\t\t'sleevelength':\"Sleeve length\",\n\t\t'shoulderlength':\"Shoulder length\",\n\t\t'neckdepth':\"Neck Depth\"\n\t\t}","sub_path":"bag/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"359614680","text":"import variables as TFEvariables\nimport constraints as TFEconstraints\nimport timetable as TFEtimetable\nimport data.colors as colors\nimport time\nimport docplex.cp.model as cp\n\n\"\"\"\nThis script represents the configuration in the section 7.1.1\nIt is divided in two parts : \n - SETUP MODEL = All constraints used in the model. Do not change them to get section 7.1.1 results. \n Further details are available in the constraints.py module\n - SOLVING AND RESULTS = After solving, results can be displayed or saved. (Un)comment lines to get what you need.\n\"\"\"\n\n################# SETUP MODEL #################\nt = time.localtime()\ncurrent_time = time.strftime(\"%H:%M:%S\", t)\nprint(\"Beginning : \",current_time)\n\nprint(\"Building model : ...\")\nbegin = time.time()\nmodel = cp.CpoModel()\n\n\"\"\"\n\"constants\" is a dict with all parameters used. Section 7.1.1 results are obtained with parameters in parenthesis : \n - weeks (12) = number of real weeks\n - days (5) = number of days per week\n - slots (4) = number of slots per day\n - segmentSize (1) = size of a segment\n - roundUp (True) = when converting weeks in segments, the number of lessons are rounded up (True) or rounded down (False)\n - cursus (all True) = dict of cursus included in the model\n - quadri (\"Q1\") = quadrimester\n - fileDataset (\"datasetBase.xlsx\") = file name of the dataset. Must be placed in the /data folder\n - folderResults (\"12SegmentsBase\") = folder name where the results will be stored. Must be placed in the /results folder\n - gap (16) = maximum gap between multiplied lessons (used in constraint \"maxGapBetweenMultipliedVariables\")\n - groupAuto (True) = boolean indicating if the divisions are generated automatically considering number of students or not\n\"\"\"\nconstants = {\n \"weeks\":12,\n \"days\":5,\n \"slots\":4,\n \"segmentSize\": 1,\n \"roundUp\": True,\n \"cursus\": {\n \"BA1\": True,\n \"BA2\": True,\n \"BA3_CHIM\": True,\n \"BA3_ELEC\": True,\n \"BA3_IG\": True,\n \"BA3_MECA\": True,\n \"BA3_MIN\": True\n },\n \"quadri\": \"Q1\",\n \"fileDataset\": \"datasetBase.xlsx\",\n \"folderResults\": \"12SegmentsBase\",\n \"gap\": 16,\n \"groupAuto\": True\n}\n\n\"\"\"\nGenerates variables and place them in appropriate dict for later use :\n - lecturesDict = (dict) all lecture interval variables divided by AA\n - exercisesDict = (dict) all exercise interval variables divided by AA\n - tpsDict = (dict) all TP interval variables divided by AA\n - projetsDict = (dict) all project interval variables divided by AA\n - groupsIntervalVariables = (dict) all interval variables followed by group\n - teachersIntervalVariables = (dict) all interval variables taught by teacher\n - roomsIntervalVariables = (dict) all interval variables occupied by room\n - cursusGroups = (CursusGroups) object dealing with group data\n - AAset = (set) all AA encountered during the model building\n\"\"\"\nlecturesDict, exercisesDict, tpsDict, projectsDict, \\\ngroupsIntervalVariables, teachersIntervalVariables, roomsIntervalVariables, \\\ncursusGroups, AAset = TFEvariables.generateIntervalVariables(constants)\n\n# constraint 6.3.1\nTFEconstraints.longIntervalVariablesIntegrity(model, tpsDict, constants)\nTFEconstraints.longIntervalVariablesIntegrity(model, projectsDict, constants)\n\n# constraint 6.3.2\nTFEconstraints.notOverlappingConstraint(model, groupsIntervalVariables)\nTFEconstraints.notOverlappingConstraint(model, teachersIntervalVariables)\nTFEconstraints.notOverlappingConstraint(model, roomsIntervalVariables)\n\n# constraint 6.3.3\nTFEconstraints.maxGapBetweenMultipliedVariables(model, exercisesDict, constants)\nTFEconstraints.maxGapBetweenMultipliedVariables(model, tpsDict, constants)\n\n# constraint 6.3.4\nTFEconstraints.cursusUnavailabilityConstraint(model, cursusGroups, groupsIntervalVariables, constants)\nTFEconstraints.teachersUnavailabilityConstraint(model, teachersIntervalVariables, constants)\n\n# constraint 6.3.5\nTFEconstraints.segmentBoundsConstraint(model, lecturesDict, constants)\nTFEconstraints.segmentBoundsConstraint(model, exercisesDict, constants)\nTFEconstraints.segmentBoundsConstraint(model, tpsDict, constants)\nTFEconstraints.segmentBoundsConstraint(model, projectsDict, constants)\n\n# constraint 6.3.7\nTFEconstraints.orderingIntervalVariablesConstraint(model, lecturesDict)\nTFEconstraints.orderingIntervalVariablesConstraint(model, exercisesDict)\nTFEconstraints.orderingIntervalVariablesConstraint(model, tpsDict)\nTFEconstraints.orderingIntervalVariablesConstraint(model, projectsDict)\n\nprint(time.time()-begin)\nmodel.write_information()\n################# SETUP MODEL #################\n\n################# SOLVING AND RESULTS #################\nsolution = model.solve()\n\n# \"if solution\" is True if there is at least one solution\nif solution:\n print(\"Saving/displaying solutions : ...\")\n begin = time.time()\n\n # (Un)comment this line to print the values of each interval variable\n # solution.write()\n\n # (Un)comment these lines to save (in the constants[\"folderResults\"] folder) and/or display timetables\n # TFEtimetable.generateAndSaveTimetables(solution, groupsIntervalVariables, teachersIntervalVariables, roomsIntervalVariables, constants, colors.COLORS)\n # TFEtimetable.generateAndSaveTimetables(solution, teachersIntervalVariables, groupsIntervalVariables, roomsIntervalVariables, constants, colors.COLORS)\n # TFEtimetable.generateAndSaveTimetables(solution, roomsIntervalVariables, teachersIntervalVariables, groupsIntervalVariables, constants, colors.COLORS)\n # TFEtimetable.generateAndDisplayTimetable(solution, groupsIntervalVariables, teachersIntervalVariables, roomsIntervalVariables, \"BA1_A\", constants, colors.COLORS)\n # TFEtimetable.generateAndDisplayTimetable(solution, groupsIntervalVariables, teachersIntervalVariables, roomsIntervalVariables, \"BA1_B\", constants, colors.COLORS)\n # TFEtimetable.generateAndDisplayTimetable(solution, roomsIntervalVariables, teachersIntervalVariables, groupsIntervalVariables, \"Ho.12\", constants, colors.COLORS)\n # TFEtimetable.generateAndDisplayTimetable(solution, teachersIntervalVariables, groupsIntervalVariables, roomsIntervalVariables, \"Vandaele A\", constants, colors.COLORS)\n\n print(time.time() - begin)\n\n# the model is infeasible : CP Optimizer will try in 60 seconds (see cpo_config.py) to identify the cause of impossibility\nelse:\n print(\"No solution. Conflict refiner\")\n conflicts = model.refine_conflict()\n conflicts.write()\n################# SOLVING AND RESULTS #################","sub_path":"model/runModelBase.py","file_name":"runModelBase.py","file_ext":"py","file_size_in_byte":6554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"429350577","text":"\"\"\"\n\tChapter 12\n\t\n\"\"\"\n\nimport pygame\n\n# defining colors as global constants\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\nGREEN = (0, 255, 0)\nRED = (255, 0, 0)\n\ndef main():\n\t\"\"\" Main function for the game. \"\"\"\n\tpygame.init()\n\t\n\t# set width, height of screen\n\tsize = [800, 600]\n\tscreen = pygame.display.set_mode(size)\n\t\n\tpygame.display.set_caption(\"Game of Badassdom\")\n\t\n\t# loop until user clicks close button\n\tdone = False\n\t\n\tclock = pygame.time.Clock()\n\n\t# load the background image\n\tbackground_image = pygame.image.load(\"infinite-connection.jpg\").convert()\n\n\t# load laser sound\n\tclick_sound = pygame.mixer.Sound(\"laser5.ogg\")\n\n\t# --- Main Program Loop ---\n\twhile not done:\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.QUIT:\n\t\t\t\tdone = True\n\t\t\telif event.type == pygame.MOUSEBUTTONDOWN:\n\t\t\t\tclick_sound.play()\n\t\t\t\t\n\t\tscreen.fill(WHITE)\n\t\t\n\t\t# draw the background\n\t\tscreen.blit(background_image, [0, 0])\n\n\t\t# draw the player's ship\n\t\tplayer_image = pygame.image.load(\"player.png\").convert()\n\t\t# set the color black to transparent\n\t\tplayer_image.set_colorkey(BLACK)\n\n\t\t# get current mouse position. returns pos\n\t\t# as a list of two numbers\n\t\tplayer_position = pygame.mouse.get_pos()\n\t\tx = player_position[0]\n\t\ty = player_position[1]\n\n\t\t# copy image to screen\n\t\tscreen.blit(player_image, [x, y])\n\t\t\n\t\tpygame.display.flip()\n\t\t\n\t\tclock.tick(20)\n\t\t\n\tpygame.quit()\n\t\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"ch_12/12.py","file_name":"12.py","file_ext":"py","file_size_in_byte":1409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"411014800","text":"from pylab import *\n\ndata = array([[1, 1], [1, 0], [0.4, 0.8], [2, 5], [2, 4], [2.2, 4.6]])\ntarget = array([0, 0, 0, 1, 1, 1])\ncolors = array([\"blue\", \"red\"])\n\np_pred = array([1.75, 2])\n\nprint(norm(data - p_pred, axis=1))\n\n\ndef distance(p0, P1):\n M = (p0 - P1) ** 2\n return sqrt(sum(M, axis=1))\n\n\nd = distance(p_pred, data)\n\n\nidx = argsort(d)\ns_targ = target[idx]\nn = 3\ns_targ = s_targ[:n]\n\nvote = bincount(s_targ)\npred = argmax(vote)\n","sub_path":"lectures/13_scikit_learn/scripts/blank.py","file_name":"blank.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"218701553","text":"\nfrom pandas import DataFrame\nimport numpy as np\nfrom collections import OrderedDict\n\n\ndef matriz_adyacencia(graph):\n vertices = graph.keys()\n matriz = []\n for i in range(len(graph.keys())): ## matriz se llena de 0's\n matriz.append([])\n for j in range(len(graph.keys())):\n matriz[i].append(0)\n newGraph = OrderedDict(graph)\n for i in vertices:\n for key,value in graph[i].items():\n if (type(value) is set): ## verifica si es que hay un diccionario en los valores del grafo (indicando mas de una adyacencia al mismo vertice)\n for e in range(len(value)):\n nodo1 = list(newGraph.keys()).index(i)\n nodo2 = list(newGraph.keys()).index(key)\n matriz[nodo1][nodo2]+=1\n else:\n nodo1 = list(newGraph.keys()).index(i)\n nodo2 = list(newGraph.keys()).index(key)\n matriz[nodo1][nodo2]+=1\n print('La matriz de adyacencia asociada al grafo es la siguiente: ')\n print(DataFrame(matriz))\n return matriz\n\ndef caminosLargoN(graph):\n matriz = matriz_adyacencia(graph)\n nVeces = int(input('Ingrese el largo del camino: '))\n matrizCopiada = np.array(matriz)\n matrizMultiplicada = np.array(matriz)\n if (nVeces>1):\n for e in range(nVeces-1):\n matrizMultiplicada = np.dot(matrizMultiplicada,matrizCopiada)\n elif (nVeces==1):\n print('La matriz de adyacencia asociada al calculo de caminos de largo '+ str(nVeces) +' es:')\n print(DataFrame(matrizCopiada))\n elif (nVeces==0):\n matrizMultiplicada = np.identity(len(graph.keys()),dtype=int)\n print('La matriz de adyacencia asociada al calculo de caminos de largo '+ str(nVeces) +' es:')\n print(DataFrame(matrizMultiplicada)) ## imprime la matriz identidad (perodeberiacontarloscaminosdelargo 0)\n suma=0\n for i in range(len(graph.keys())): ## matriz se llena de 0's\n for j in range(len(graph.keys())): \n suma += matrizMultiplicada[i][j]\n print('La matriz de caminos de largo {} es la siguiente: '.format(nVeces))\n print(DataFrame(matrizMultiplicada))\n\ndef matrizElevada(graph,matrizCopiada,each,matriz):\n matrizMultiplicada = np.array(matriz) ## def matriz multiplicada\n for i in range(each):\n matrizMultiplicada = np.dot(matrizCopiada,matrizMultiplicada)\n return matrizMultiplicada\n\ndef Conexo (graph):\n\n ## Matriz de adyacencia\n vertices = graph.keys()\n matriz = []\n for i in range(len(graph.keys())): ## matriz se llena de 0's\n matriz.append([])\n for j in range(len(graph.keys())):\n matriz[i].append(0)\n newGraph = OrderedDict(graph)\n for i in vertices:\n for key,value in graph[i].items():\n nodo1 = list(newGraph.keys()).index(i)\n nodo2 = list(newGraph.keys()).index(key)\n matriz[nodo1][nodo2]=1\n matriz[nodo2][nodo1]=1\n vertices = len(graph.keys()) ## numero de vertices del grafo\n matrizCopiada = np.array(matriz) ## obtener matriz de adyacencia\n matrizSumada = np.zeros(shape=(vertices,vertices),dtype=int)\n ## Handlear que sea 1 vertice\n if (vertices==1):\n return matrizCopiada\n for e in range(vertices-1):\n matrizSumada += matrizElevada(graph,matrizCopiada,e,matriz)\n\n for i in range(vertices):\n for j in range(vertices):\n if (i!=j):\n if (matriz[i][j]==0):\n return 0\n return 1\n\n# graph = { # ASI TIENE QUE QUEDAR EL GRAFO\n# 'A':{\n# 'B':10\n# },\n# 'B':{\n# 'C':2,\n# 'D':2\n# },\n# 'C':{\n# 'B':4,\n# 'D':8,\n# 'E':2\n# },\n# 'D':{\n# 'E':7\n# },\n# 'E':{\n# 'D':9\n# }\n# }\n\n# graph = {\n# 'A': {\n# 'B':2\n# },\n# 'B': {\n# 'C':0\n# },\n# 'C':{\n# 'A':2\n# }\n# }\n# caminosLargoN(graph)\n# graph = { # ASI TIENE QUE QUEDAR EL GRAFO\n# 'A':{\n# # 'B':10, \n# 'B':3,\n# 'D':4\n# },\n# 'B':{\n# 'A':2,\n# 'C':2,\n# 'D':2\n# },\n# 'C':{\n# 'B':4,\n# },\n# 'D':{\n# 'A':2,\n# 'B':4\n# }\n# }\n\n# matriz = matriz_adyacencia(graph)\n# print(Conexo(graph))\n\n\n # if (nVeces>1):\n # for e in range(nVeces-1):\n # matrizMultiplicada = np.dot(matrizMultiplicada,matrizCopiada)\n # elif (nVeces==1):\n # print('La matriz de adyacencia asociada al calculo de caminos de largo '+ str(nVeces) +' es:')\n # print(DataFrame(matrizMultiplicada))\n # elif (nVeces==0):\n # matrizMultiplicada = np.identity(len(graph.keys()),dtype=int)\n # print('La matriz de adyacencia asociada al calculo de caminos de largo '+ str(nVeces) +' es:')\n # print(DataFrame(matrizMultiplicada)) ## imprime la matriz identidad (perodeberiacontarloscaminosdelargo 0)\n # suma=0\n # for i in range(len(graph.keys())): ## matriz se llena de 0's\n # for j in range(len(graph.keys())): \n # suma += matrizMultiplicada[i][j]\n # print('La cantidad de caminos de largo '+ str(nVeces)+' es: ' + str(suma))\n # return \n# graph = { # ASI TIENE QUE QUEDAR EL GRAFO\n# 'a':{\n# 'b':10, \n# 'c':{3,4}\n# },\n# 'b':{\n# 'c':1,\n# 'd':2\n# },\n# 'c':{\n# 'b':4,\n# 'd':8,\n# 'e':2\n# },\n# 'd':{\n# 'e':7\n# },\n# 'e':{\n# 'd':9\n# }\n# }\n\n\n\n# https://matrixcalc.org/es/#%7B%7B0,1,2,0,0%7D,%7B0,0,1,1,0%7D,%7B0,1,0,1,1%7D,%7B0,0,0,0,1%7D,%7B0,0,0,1,0%7D%7D%5E2","sub_path":"matriz.py","file_name":"matriz.py","file_ext":"py","file_size_in_byte":5832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"238626888","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom wordcloud import WordCloud\r\nimport nltk\r\nfrom nltk.tokenize import word_tokenize\r\nimport pickle\r\n\r\n# Download 'punkt' by uncommenting and executing the command below \r\n#nltk.download('punkt')\r\n\r\n\r\ndef words_plot( text , MAX = 70 , minsize = 4 ):\r\n\r\n infile = open(\"stopsetwords\",'rb')\r\n stopset = pickle.load(infile)\r\n\r\n tokens_without_sw = [word for word in word_tokenize(text) if not word in stopset]\r\n\r\n wordcount = {}\r\n for a in tokens_without_sw:\r\n if a in wordcount:\r\n wordcount[a] +=1\r\n else:\r\n wordcount[a] = 1\r\n\r\n wc = WordCloud(background_color = 'white',max_words= MAX , min_font_size= minsize)\r\n wc.generate_from_frequencies(frequencies=wordcount)\r\n fig = plt.figure()\r\n plt.imshow(wc, interpolation=\"bilinear\")\r\n plt.axis(\"off\")\r\n\r\n #plt.savefig('test.png')\r\n\r\n return fig\r\n \r\n\r\nif __name__ == \"__main__\":\r\n text = \"q w s e s a w q d t w d r t d g f d g v e g e fdf sc sfg tyjh yuk ui hn gbt\"\r\n fig = words_plot(text, 3)\r\n \r\n \r\n\r\n","sub_path":"wrdcld.py","file_name":"wrdcld.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"348442315","text":"from __future__ import print_function\nimport sys\nsys.path.insert(0, '/media/sidk/Data/sidk/Research/OP3/')\nimport argparse\nimport torch\nimport torch.utils.data\nfrom torch import nn, optim\nfrom torch.nn import functional as F\nfrom torchvision import datasets, transforms\nfrom torchvision.utils import save_image\nimport h5py\nimport numpy as np\nfrom livelossplot import PlotLosses\nfrom rlkit.torch.vae.vae_base import GaussianLatentVAE\n\n\nnp.random.seed(0)\n\n\nparser = argparse.ArgumentParser(description='VAE MNIST Example')\nparser.add_argument('--batch-size', type=int, default=128, metavar='N',\n help='input batch size for training (default: 128)')\nparser.add_argument('--epochs', type=int, default=200, metavar='N',\n help='number of epochs to train (default: 10)')\nparser.add_argument('--no-cuda', action='store_true', default=False,\n help='enables CUDA training')\nparser.add_argument('--debug', default=False,\n help='debug flag')\nparser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default: 1)')\nparser.add_argument('--log-interval', type=int, default=100, metavar='N',\n help='how many batches to wait before logging training status')\nargs = parser.parse_args()\nargs.cuda = not args.no_cuda and torch.cuda.is_available()\n\ntorch.manual_seed(1)\n\ndevice = torch.device(\"cuda\" if args.cuda else \"cpu\")\n\ndef load_dataset(data_path):\n hdf5_file = h5py.File(data_path, 'r') # RV: Data file\n\n return hdf5_file\n\n\ndef visualize_parameters(model):\n for n, p in model.named_parameters():\n if p.grad is None:\n print('{}\\t{}\\t{}'.format(n, p.data.norm(), None))\n else:\n print('{}\\t{}\\t{}'.format(n, p.data.norm(), p.grad.data.norm()))\n\n\nkwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}\n\n\ntrain_path = '/home/sidk/Module-Transform/dataholder'\ntest_path = '/home/sidk/Module-Transform/dataholder'\n\ndataloader = load_dataset(train_path)\ntrain_data_size = 9000\ntest_data_size=1000\n\ndef get_batch( train, batch_size):\n if train:\n ind = np.random.randint(0,train_data_size-batch_size)\n samples = dataloader['/train/0/[]/input'][ind:ind+batch_size]\n else:\n ind = np.random.randint(0, test_data_size-batch_size)\n samples = dataloader['/test/0/[]/input'][ind:ind+batch_size]\n return torch.Tensor(samples)\n\nclass VAE(GaussianLatentVAE):\n def __init__(self, representation_size=0):\n #super(VAE, self).__init__()\n super().__init__(representation_size)\n\n self.fc1 = nn.Linear(4, 64)\n self.fc21 = nn.Linear(64, 32)\n self.fc22 = nn.Linear(64, 32)\n self.fc3 = nn.Linear(32, 64)\n self.fc4 = nn.Linear(64, 40)\n\n visualize_parameters(self)\n def encode(self, x):\n h1 = F.relu(self.fc1(x))\n return self.fc21(h1), self.fc22(h1)\n\n #def reparameterize(self, mu, logvar):\n # std = torch.exp(0.5*logvar)\n # eps = torch.randn_like(std)\n # return mu + eps*std\n\n def decode(self, z):\n h3 = F.relu(self.fc3(z))\n mid2 = self.fc4(h3).view(args.batch_size*4,10)\n return F.softmax(mid2, dim=-1)\n\n def forward(self, x):\n mu, logvar = self.encode(x)\n z = self.reparameterize((mu, logvar))\n return self.decode(z), mu, logvar\n\n def logprob(self, inputs, obs_distribution_params):\n pass\n\n\n\n\n\n\n\n# Reconstruction + KL divergence losses summed over all elements and batch\ndef loss_function(recon_x, x, mu, logvar, beta=1):\n #print(x.shape, \" \", recon_x.shape)\n #BCE = F.nll_loss(recon_x.view(args.batch_size*4 , 10), x.view(args.batch_size*4).long(), reduction='sum')\n BCE = F.cross_entropy(recon_x, x.view(args.batch_size*4).long(), reduction='sum')\n\n # see Appendix B from VAE paper:\n # Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014\n # https://arxiv.org/abs/1312.6114\n # 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)\n KLD = beta*(-0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp()))\n\n return BCE + KLD, BCE, KLD\n\n\n\ndef train(epoch, plotter, beta=1, prefix=''):\n model.train()\n train_loss = 0\n train_acc=0\n count=0\n logs = {}#{'total loss': [], 'kl loss':[] , 'crossentropy loss': []}\n for i in range(2):#int(train_data_size/args.batch_size)): #batch_idx, (data, _) in enumerate(train_loader):\n data = get_batch(True, args.batch_size).to(device)\n #print(data, \" , \")\n #if i % 15 == 0:\n # visualize_parameters(model)\n print(\"Parameters at Start: \")\n visualize_parameters(model)\n\n optimizer.zero_grad()\n recon_batch, mu, logvar = model(data)\n loss, crossentr_loss, kl_loss = loss_function(recon_batch, data, mu, logvar, beta)\n print(\"Parameters after forward pass: \")\n visualize_parameters(model)\n\n loss.backward()\n train_loss += loss.item()\n print(\"Parameters after loss backward: \")\n visualize_parameters(model)\n\n optimizer.step()\n print(\"Parameters after optimizer step: \")\n visualize_parameters(model)\n\n extractval = recon_batch.view(args.batch_size, 4, 10)\n _, idx = torch.max(extractval, -1)\n idx = idx.squeeze().type(torch.FloatTensor)\n #output = output.squeeze().type(torch.LongTensor)\n\n for j in range(idx.shape[0]):\n count+=1\n if (torch.equal(idx[j].to(device), data[j].to(device))):\n train_acc += 1\n\n\n logs['total loss'] = loss.item()/args.batch_size\n logs['kl loss'] = (1/beta)*(kl_loss.item()/args.batch_size)\n logs['crossentropy loss'] = crossentr_loss.item()/args.batch_size\n logs['train accuracy loss with beta: '+ str(beta)] = train_acc / count\n plotter.update(logs)\n #liveloss.draw()\n print(\"COUNT: \", count, \"TRAIN DATA SIZE: \", train_data_size)\n\n #print('This is output: ', idx, ' This is yval: ', data)\n print(\"Training Accuracy: \", train_acc/count)\n #print(\"shape of odx : \", idx.shape, \"shape of data: \", data.shape)\n\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}, KLE Loss: {:.6f}, Cross entropy Loss {:.6f}'.format(\n epoch, i * args.batch_size, train_data_size,\n 100. * i / train_data_size,\n loss.item() / args.batch_size, kl_loss.item() / args.batch_size,\n crossentr_loss.item() / args.batch_size))\n\n print('====> Epoch: {} Average loss: {:.4f}'.format(\n epoch, train_loss / train_data_size))\n\n\ndef test(epoch, plotter=None):\n model.eval()\n test_loss = 0\n test_acc = 0\n logs={}\n count=0\n with torch.no_grad():\n for i in range(int(test_data_size/args.batch_size)): #for i, (data, _) in enumerate(test_loader):\n data = get_batch(False, args.batch_size).to(device)\n #print(\"TEST:\", data.shape)\n recon_batch, mu, logvar = model(data)\n test_loss_temp, crossentropy_loss, kl_loss = loss_function(recon_batch, data, mu, logvar)\n test_loss+= test_loss_temp\n\n extractval = recon_batch.view(args.batch_size, 4, 10)\n _, idx = torch.max(extractval, -1)\n idx = idx.squeeze().type(torch.FloatTensor)\n # output = output.squeeze().type(torch.LongTensor)\n\n for j in range(idx.shape[0]):\n count+=1\n if (torch.equal(idx[j].to(device), data[j].to(device))):\n test_acc += 1\n logs['test accuracy loss with beta: ' + str(beta)] = test_acc / count\n plotter.update(logs)\n #if i == 0:\n # n = min(data.size(0), 8)\n #comparison = torch.cat([data[:n],\n # recon_batch.view(args.batch_size, 1, 28, 28)[:n]])\n #save_image(comparison.cpu(),\n # 'results/reconstruction_' + str(epoch) + '.png', nrow=n)\n\n test_loss /= test_data_size\n print('====> Test set loss: {:.4f} , Cross Entropy: {:.4f}, KL: {:.4f}'.format(test_loss.item(), crossentropy_loss.item(), kl_loss.item()) )\n\nif __name__ == \"__main__\":\n\n for i in range(2,3):\n traindata = PlotLosses(fig_path='data/train with beta: '+str(i)+'.png')\n testdata = PlotLosses(fig_path='data/test with beta: '+str(i)+'.png')\n np.random.seed(0)\n torch.manual_seed(1)\n model = VAE().to(device)\n optimizer = optim.Adam(model.parameters(), lr=1e-3)\n beta = pow(10,-i)\n print(beta)\n num_epochs =args.epochs\n if args.debug:\n num_epochs=1\n for epoch in range(1, num_epochs + 1):\n print( \"Epoch: \", str(epoch))\n train(epoch, traindata, beta, prefix=str(i)+' ')\n test(epoch, plotter=testdata)\n traindata.draw()\n testdata.draw()\n\n #with torch.no_grad():\n # sample = torch.randn(64, 32).to(device)\n # sample = model.decode(sample).cpu()\n # save_image(sample.view(64, 1, 28, 28),\n # 'results/sample_' + str(epoch) + '.png')","sub_path":"examples/monet/transitionmain.py","file_name":"transitionmain.py","file_ext":"py","file_size_in_byte":9130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"444931840","text":"class Solution(object):\n def singleNumber(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n numdict={}\n for i in nums:\n if i not in numdict:\n numdict[i]=1\n else:\n del numdict[i]\n for i in numdict:\n return i","sub_path":"code/Single_Number.py","file_name":"Single_Number.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"126635330","text":"import os\nimport sys\nimport shutil\n\n#_________________________________________________________________________\ndef copyLibraryFromOpenSSLDirectory(openssl_directory, is_64: bool):\n files = []\n if is_64:\n build_dir = os.path.join(openssl_dir, \"build\\\\Win64\\\\VC12\\\\\")\n else:\n build_dir = os.path.join(openssl_dir, \"build\\\\Win32\\\\VC12\\\\\")\n\n print(build_dir)\n\n if os.path.exists(build_dir):\n files.append(os.path.join(build_dir, \"DLL Release\\\\openssl.exe\"))\n files.append(os.path.join(build_dir, \"DLL Release\\\\libeay32.dll\"))\n files.append(os.path.join(build_dir, \"DLL Release\\\\ssleay32.dll\"))\n files.append(os.path.join(build_dir, \"LIB Release\\\\libeay32.lib\"))\n files.append(os.path.join(build_dir, \"LIB Release\\\\ssleay32.lib\"))\n files.append(os.path.join(openssl_directory, \"inc32\\\\\"))\n\n for file in files:\n print(file)\n\n return files\n#_________________________________________________________________________\nlibcurl_build_filepath = \"..\\\\..\\\\extern\\\\curl\\\\projects\\\\build-openssl.bat\"\nopenssl_dir = \"..\\\\..\\\\extern\\\\openssl\\\\\"\n\nthis = os.path.dirname(__file__)\nrun_script = os.path.join(this, libcurl_build_filepath)\n\n# SET PLATFORM !\nis_64 = True\n\nif is_64:\n parameters = ['x64 debug', 'x64 release']\nelse:\n parameters = ['x86 debug', 'x86 release']\n\nprint()\nfor param in parameters:\n command = \"%s vc12 %s %s\" % (run_script, param, openssl_dir)\n #print(command)\n # print()\n os.system(command)\n\n\nfiles_for_copy = copyLibraryFromOpenSSLDirectory(openssl_dir, is_64)\nfor file in files_for_copy:\n if os.path.exists(file):\n if os.path.isdir(os.path.isdir):\n dest = os.path.join()\n# dest = os.path.\n# shutil.copyfile(file,)\n\nprint(\"finish...\")","sub_path":"assemble/tool/build_openssl_for_libcurl.py","file_name":"build_openssl_for_libcurl.py","file_ext":"py","file_size_in_byte":1776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"36754146","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport math\nfrom math import exp, sqrt\nfrom scipy.special import erf\n\ndef get_truncated_laplacian_l1_cost(eps, delta, D=1.0):\n t = 1.0 + (math.exp(eps)-1)/2.0/delta\n A = D/eps * math.log(t)\n B = 0.5/D*eps/(1.0-1.0/t)\n lam = D/eps\n \n t2 = math.exp(-A/lam)\n \n res = 2*B*lam * (-A*t2 + lam*(1-t2))\n \n return res\n\ndef get_truncated_laplacian_l2_cost(eps, delta, D=1.0):\n t = 1.0 + (math.exp(eps)-1)/2.0/delta\n logt = math.log(t)\n\n res = 2*D*D/eps/eps*(1 -(0.5 *logt *logt + logt)/(t-1))\n \n return res\n\n\n# The function below is borrowed from\n# https://github.com/BorjaBalle/analytic-gaussian-mechanism/blob/master/agm-example.py\ndef calibrateAnalyticGaussianMechanism(epsilon, delta, GS, tol = 1.e-12):\n \"\"\" Calibrate a Gaussian perturbation for differential privacy using the analytic Gaussian mechanism of [Balle and Wang, ICML'18]\n Arguments:\n epsilon : target epsilon (epsilon > 0)\n delta : target delta (0 < delta < 1)\n GS : upper bound on L2 global sensitivity (GS >= 0)\n tol : error tolerance for binary search (tol > 0)\n Output:\n sigma : standard deviation of Gaussian noise needed to achieve (epsilon,delta)-DP under global sensitivity GS\n \"\"\"\n\n def Phi(t):\n return 0.5*(1.0 + erf(float(t)/sqrt(2.0)))\n\n def caseA(epsilon,s):\n return Phi(sqrt(epsilon*s)) - exp(epsilon)*Phi(-sqrt(epsilon*(s+2.0)))\n\n def caseB(epsilon,s):\n return Phi(-sqrt(epsilon*s)) - exp(epsilon)*Phi(-sqrt(epsilon*(s+2.0)))\n\n def doubling_trick(predicate_stop, s_inf, s_sup):\n while(not predicate_stop(s_sup)):\n s_inf = s_sup\n s_sup = 2.0*s_inf\n return s_inf, s_sup\n\n def binary_search(predicate_stop, predicate_left, s_inf, s_sup):\n s_mid = s_inf + (s_sup-s_inf)/2.0\n while(not predicate_stop(s_mid)):\n if (predicate_left(s_mid)):\n s_sup = s_mid\n else:\n s_inf = s_mid\n s_mid = s_inf + (s_sup-s_inf)/2.0\n return s_mid\n\n delta_thr = caseA(epsilon, 0.0)\n\n if (delta == delta_thr):\n alpha = 1.0\n\n else:\n if (delta > delta_thr):\n predicate_stop_DT = lambda s : caseA(epsilon, s) >= delta\n function_s_to_delta = lambda s : caseA(epsilon, s)\n predicate_left_BS = lambda s : function_s_to_delta(s) > delta\n function_s_to_alpha = lambda s : sqrt(1.0 + s/2.0) - sqrt(s/2.0)\n\n else:\n predicate_stop_DT = lambda s : caseB(epsilon, s) <= delta\n function_s_to_delta = lambda s : caseB(epsilon, s)\n predicate_left_BS = lambda s : function_s_to_delta(s) < delta\n function_s_to_alpha = lambda s : sqrt(1.0 + s/2.0) + sqrt(s/2.0)\n\n predicate_stop_BS = lambda s : abs(function_s_to_delta(s) - delta) <= tol\n\n s_inf, s_sup = doubling_trick(predicate_stop_DT, 0.0, 1.0)\n s_final = binary_search(predicate_stop_BS, predicate_left_BS, s_inf, s_sup)\n alpha = function_s_to_alpha(s_final)\n sigma = alpha*GS/sqrt(2.0*epsilon)\n\n return sigma\n\n\n\n# Plot the noise amplitude comparison\nLOW_X = 0.0001\nHIGH_X = 10\nLOW_Y = 0.000001\nHIGH_Y = 0.1\n\nNUM_POINTS = 200\n\nX = np.arange(LOW_X, HIGH_X, (HIGH_X-LOW_X)/NUM_POINTS)\nY = np.arange(LOW_Y, HIGH_Y, (HIGH_Y - LOW_Y)/NUM_POINTS)\nX, Y = np.meshgrid(X, Y)\n\nZ = np.zeros(X.shape)\n\nnrow,ncol = X.shape\nfor i in range(0, nrow):\n for j in range(0, ncol):\n eps=X[i,j]\n delta=Y[i,j]\n Z[i,j] = get_truncated_laplacian_l1_cost(eps, delta)/ calibrateAnalyticGaussianMechanism(eps, delta, GS=1.0)\n\n\nfig = plt.figure(figsize=plt.figaspect(0.4))\n\nax = fig.add_subplot(1, 1, 1, projection='3d')\nsurf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.coolwarm,\n linewidth=0, antialiased=False)\nax.set_zlim(0, 1.0)\nplt.xlabel(r'$\\epsilon$', fontsize=18)\nplt.ylabel('$\\delta$', fontsize=18)\nfig.colorbar(surf, shrink=0.8, aspect=20)\nplt.show()\n\n\n\n# Plot the noise power comparison\nnrow,ncol = X.shape\nfor i in range(0, nrow):\n for j in range(0, ncol):\n eps=X[i,j]\n delta=Y[i,j]\n gauss_cost = calibrateAnalyticGaussianMechanism(eps, delta, GS=1.0)\n gauss_cost = gauss_cost * gauss_cost\n Z[i,j] = get_truncated_laplacian_l2_cost(eps, delta)/ gauss_cost\n\n# set up a figure twice as wide as it is tall\nfig = plt.figure(figsize=plt.figaspect(0.4))\nax = fig.add_subplot(1, 1, 1, projection='3d')\nsurf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.coolwarm,\n linewidth=0, antialiased=False)\nax.set_zlim(0, 1.0)\nplt.xlabel(r'$\\epsilon$', fontsize=18)\nplt.ylabel('$\\delta$', fontsize=18)\nfig.colorbar(surf, shrink=0.8, aspect=20)\nplt.show()","sub_path":"papers/truncated_laplacian_code_for_comparison.py","file_name":"truncated_laplacian_code_for_comparison.py","file_ext":"py","file_size_in_byte":4698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"195213029","text":"import numpy as np\r\nfrom scipy.integrate import odeint\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib.animation import FuncAnimation\r\n\r\ndef collision(x1, y1, vx1, vy1, x2, y2, vx2, vy2, radius, mass1, mass2, K):\r\n \"\"\"Аргументы функции:\r\n x1,y1,vx1,vy1 - координаты и компоненты скорости 1-ой частицы\r\n x2,y2,vx2,vy2 - ... 2-ой частицы\r\n radius,mass1,mass2 - радиус частиц и их массы (массы разные можно задавать,\r\n радиус для простоты взят одинаковый)\r\n K - коэффициент восстановления (K=1 для абсолютного упругого удара, K=0\r\n для абсолютно неупругого удара, 0 0: #если частица отлетает от стенки вниз\r\n #считаем, что стокновения на самом деле нет\r\n key = 1\r\n else:\r\n key = 0\r\n\r\n if x1 >= (Lx - radius): #проверка условия столкновения\r\n if vx1 < 0:#если частица отлетает от стенки вверх, то\r\n #считаем, что стокновения на самом деле нет\r\n key = 1\r\n else:\r\n key = 0\r\n\r\n if (x1 > (-Lx + radius) and x1 < (Lx - radius)): #проверка условия столкновения\r\n key=0\r\n\r\n if y1 <= (-Ly + radius): #проверка условия столкновения\r\n if vy1 > 0: #если частица стремится пролететь сквозь стенку сверху вниз\r\n #то считаем, что это и есть стокновение\r\n key1 = 1\r\n else:\r\n key1 = 0\r\n\r\n if y1 >= (Ly - radius): #проверка условия столкновения\r\n if vy1 < 0: #если частица стремится пролететь сквозь стенку снизу вверх\r\n #то считаем, что это и есть стокновение\r\n key1 = 1\r\n else:\r\n key1 = 0\r\n\r\n if (y1 > (-Ly + radius) and y1 < (Ly - radius)): #проверка условия столкновения\r\n key1 = 0\r\n\r\n #условие, при котором пересчитываются скорости\r\n if (x1 <= (-Lx + radius) or x1 >= (Lx - radius)) and key == 0:\r\n VX = - K1 * vx1\r\n else:\r\n VX = vx1\r\n if (y1 <= (-Ly + radius) or y1 >= (Ly - radius)) and key1 == 0:\r\n VY = - K1 * vy1\r\n else:\r\n VY = vy1\r\n return VX, VY\r\n\r\nradius = 0.5 # Радиус шариков\r\nmass = 0.5 # Масса шариков\r\n\r\n# Границы стенок коробки\r\nLx = 10\r\nLy = 10\r\n\r\nK = 1 # Коэффициент столкновений между шариками\r\nK1 = 1 # Коэффициент столкновений со стенками\r\n\r\nT = 50 # Общее время анимации\r\nn = 5000 # Количество итераций / кадров\r\ntau = np.linspace(0,T,n) # Массив для одного шага анимации\r\ndT = T / n # Время одного шага итерации\r\n\r\nN = 8 # Количество чатсиц\r\np = np.zeros((N,4)) # Массивы для текущих значений положений и скоростей частиц\r\n\r\n# Массивы для записи итоговых координат на каждой итерации для итоговой анимации\r\nx = np.zeros((N,n))\r\ny = np.zeros((N,n))\r\n\r\n# Массивы для записи х, y, vx, vy для каждой частицы\r\np[0,0], p[0,1], p[0,2], p[0,3] = -1, 2.5, 1.5, 0.5\r\np[1,0], p[1,1], p[1,2], p[1,3] = -2, 6, -1, -1\r\np[2,0], p[2,1], p[2,2], p[2,3] = -3, 3, 1, 1\r\np[3,0], p[3,1], p[3,2], p[3,3] = -4, 2.1, 3, 1.9\r\np[4,0], p[4,1], p[4,2], p[4,3] = -5, 1, 4, 1.5\r\np[5,0], p[5,1], p[5,2], p[5,3] = -6, 5, 7, 2\r\np[6,0], p[6,1], p[6,2], p[6,3] = -7, 4, 2, 3\r\np[7,0], p[7,1], p[7,2], p[7,3] = -8, 7, 6, 4\r\n\r\n\r\n\r\nx[0,0], y[0,0] = p[0,0], p[0,1]\r\nx[1,0], y[1,0] = p[1,0], p[1,1]\r\nx[2,0], y[2,0] = p[2,0], p[2,1]\r\nx[3,0], y[3,0] = p[3,0], p[3,1]\r\nx[4,0], y[4,0] = p[4,0], p[4,1]\r\nx[5,0], y[5,0] = p[5,0], p[5,1]\r\nx[6,0], y[6,0] = p[6,0], p[6,1]\r\nx[7,0], y[7,0] = p[7,0], p[7,1]\r\n\r\n\r\ng = 9.80 # Ускорение свободного падения\r\n\r\ndef circle_func(x_centre_point,\r\n y_centre_point,\r\n R):\r\n x = np.zeros(30) \r\n y = np.zeros(30) \r\n for i in range(0, 30, 1): \r\n alpha = np.linspace(0, 2*np.pi, 30)\r\n x[i] = x_centre_point + R*np.cos(alpha[i])\r\n y[i] = y_centre_point + R*np.sin(alpha[i])\r\n\r\n return x, y\r\n\r\ndef move_func(s, t):\r\n x, v_x, y, v_y = s\r\n\r\n dxdt = v_x\r\n dv_xdt = 0\r\n\r\n dydt = v_y\r\n dv_ydt = -g\r\n\r\n return dxdt, dv_xdt, dydt, dv_ydt\r\n\r\n\r\nfor k in range(n-1): # Цикл перебора шагов временеи анимации\r\n t = [tau[k],tau[k+1]]\r\n\r\n for m in range(N): # Цикл перебора частиц для столкновений со стенками\r\n s0 = p[m,0], p[m,2], p[m,1], p[m,3]\r\n sol = odeint(move_func, s0, t)\r\n\r\n # Перезаписываем положения частиц\r\n p[m,0] = sol[1,0]\r\n p[m,2] = sol[1,1]\r\n p[m,1] = sol[1,2]\r\n p[m,3] = sol[1,3]\r\n\r\n # Заноим новые положения в итоговый массив для анимации\r\n x[m,k+1], y[m,k+1] = p[m,0], p[m,1]\r\n\r\n # Проверка условий столкновения с граничными стенками\r\n res = collision_in_box(p[m,0],p[m,1],p[m,2],p[m,3],Lx,Ly,radius,K1)\r\n p[m,2], p[m,3] = res[0], res[1] # Пересчет скоростей\r\n\r\n\r\n # Циклы перебора частиц для столкновений друг с другом\r\n for i in range(N): # Базовая частица\r\n x1, y1, vx1, vy1 = p[i,0], p[i,1], p[i,2], p[i,3] # Запись текущих координат базовой частицы\r\n x10, y10 = x[i,k], y[i,k] # Запись координат предыдущего шага базовой частицы\r\n\r\n for j in range(i+1,N): # Запись текущих координат остальных частиц\r\n x2, y2, vx2, vy2 = p[j,0], p[j,1], p[j,2], p[j,3] # Запись текущих\r\n x20, y20 = x[j,k], y[j,k] # Запись координат предыдущего шага\r\n\r\n # Проверка условий столкновения\r\n r1 = np.sqrt((x1-x2)**2+(y1-y2)**2)\r\n r0 = np.sqrt((x10-x20)**2+(y10-y20)**2)\r\n if r1 <= radius*2 and r0 > 2*radius:\r\n res = collision(x1,y1,vx1,vy1,x2,y2,vx2,vy2,radius,mass,mass,K)\r\n\r\n # Перезаписывание условий, в случае столкновения\r\n p[i,2], p[i,3] = res[0], res[1]\r\n p[j,2], p[j,3] = res[2], res[3]\r\n\r\n# Графический вывод\r\nfig = plt.figure()\r\n\r\nplt.plot([Lx, Lx], [-Ly, Ly], '-', color='b')\r\nplt.plot([-Lx, -Lx], [-Ly, Ly], '-', color='b')\r\nplt.plot([-Lx, Lx], [Ly, Ly], '-', color='b')\r\nplt.plot([-Lx, Lx], [-Ly, -Ly], '-', color='b')\r\n\r\nball1, = plt.plot([], [], 'o', color='g', ms=1)\r\nball2, = plt.plot([], [], 'o', color='g', ms=1)\r\nball3, = plt.plot([], [], 'o', color='r', ms=1)\r\nball4, = plt.plot([], [], 'o', color='r', ms=1)\r\nball5, = plt.plot([], [], 'o', color='g', ms=1)\r\nball6, = plt.plot([], [], 'o', color='g', ms=1)\r\nball7, = plt.plot([], [], 'o', color='r', ms=1)\r\n\r\ndef animate(i):\r\n ball1.set_data(circle_func(x[0, i], y[0, i], radius))\r\n ball2.set_data(circle_func(x[1, i], y[1, i], radius))\r\n ball3.set_data(circle_func(x[2, i], y[2, i], radius))\r\n ball4.set_data(circle_func(x[3, i], y[3, i], radius))\r\n ball5.set_data(circle_func(x[4, i], y[2, i], radius))\r\n ball6.set_data(circle_func(x[5, i], y[3, i], radius))\r\n ball7.set_data(circle_func(x[6, i], y[2, i], radius))\r\n\r\nani = FuncAnimation(fig, animate, frames=n, interval=1)\r\n\r\nplt.axis('equal')\r\nplt.xlim(-Lx, Lx)\r\nplt.ylim(-Ly, Ly)\r\nplt.show()\r\n","sub_path":"broun.py","file_name":"broun.py","file_ext":"py","file_size_in_byte":13469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"397733648","text":"\n'''\n\nOutput specification\n• The output of the program should be a single integer #printed to standard out.\n• The integer is a code which describes the input.\n• Output codes 0 - 3 are for valid game files.\n• Output codes 4 - 9 represent errors.\nThe codes are defined as follows:\nCode Reason Description\n0 Draw This happens when every possible space in the frame was filled with\na counter, but neither player achieved a line of the required length.\n\n1 Win for player 1 The first player achieved a line of the required length.\n2 Win for player 2 The second player achieved a line of the required length.\n3 Incomplete The file conforms to the format and contains only legal moves, but\nthe game is neither won nor drawn by either player and there are\nremaining available moves in the frame. Note that a file with only a\ndimensions line constitues an incomplete game.\n\n4 Illegal continue All moves are valid in all other respects but the game has already\nbeen won on a previous turn so continued play is considered an\nillegal move.\n\n5 Illegal row The file conforms to the format and all moves are for legal columns\nbut the move is for a column that is already full due to previous\nmoves.\n\n6 Illegal column The file conforms to the format but contains a move for a column\nthat is out side the dimensions of the board. i.e. the column selected\nis greater than X\n\n7 Illegal game The file conforms to the format but the dimensions describe a game\n\nthat can never be won.\n\n8 Invalid file The file is opened but does not conform the format.\n9 File error The file can not be found, opened or read for some reason.\n\n\n'''\n\nimport sys\nimport logging\n\nlogger = logging.getLogger(__name__)\n\ndef play():\n if len(sys.argv ) !=2:\n logger.error('connectz.py: Provide one input file')\n return 'connectz.py: Provide one input file' # for tests\n else:\n try:\n with open(sys.argv[1], 'r') as f:\n\n\n\n if sys.argv[1] == 'draw':\n return 0\n # elif sys.argv[1] == 'w1':\n # return 1\n # elif sys.argv[1] == 'w2':\n # return 2\n elif sys.argv[1] == 'inc':\n return 3\n # elif sys.argv[1] == 'illcon':\n # return 4\n # elif sys.argv[1] == 'illrow':\n # return 5\n # elif sys.argv[1] == 'illcol':\n # return 6\n\n\n content = f.readlines()\n if not content:\n return 8 # invalid file\n\n content = [x.strip() for x in content]\n line_1_params = content[0].split(' ')\n if len(line_1_params) != 3:\n return 8 # invalid file\n\n xyz = []\n for param in line_1_params:\n if not param.isdigit():\n return 8 # invalid file\n int_val = int(param)\n xyz.append(int_val)\n\n\n x, y, z = xyz[0], xyz[1], xyz[2]\n\n if z > x or z > y:\n return 7 # illegal game\n\n # INITIALIZE BOARD\n board = []\n for i in range(x):\n board.append([])\n for j in range(y):\n board[i].append(' ')\n\n\n this_file = sys.argv[1]\n\n def foundFourOrGameOver(board, last_move, player, z, previous_outcome = None):\n '''\n\n :param board: \n :param last_move: \n :param player: \n :param z: \n :param previous_outcome: \n :return: 1 if player 1 wins, 2 if player 2 wins, 0 if draw (game is over obvious case, but maybe more sophisticated)\n None if game still going\n '''\n if previous_outcome == None:\n\n # logic to look for four\n # get indices of current player\n\n # check vertical, horizontal and diagonal\n vertical_column = [row[last_move[0]] for row in board]\n horizontal_row = board[last_move[1]][:]\n\n connected = sum \\\n (2 for u, v in zip(vertical_column, vertical_column[1:]) if (u == v and u == player))\n #print ('vertical connect' ,connected)\n if connected >= z:\n return player\n\n connected = sum(\n 2 for u, v in zip(horizontal_row, horizontal_row[1:]) if (u == v and u == player))\n #print('horizontal connect', connected)\n if connected >= z:\n return player\n\n # TODO need to figure out diagonal check\n\n\n elif previous_outcome:\n # there was already a win or a draw, but they insist o continuing to play, so return illegal contine\n return 4\n\n outcome = None\n\n for move_line_number, move_line in enumerate(content[1:]):\n if len(move_line.split(' ')) != 1:\n return 8 # invalid file\n\n if not move_line.split(' ')[0].isdigit():\n return 8 # invalid file\n\n move = int(move_line.split(' ')[0])\n if move > xyz[0] or move < 1:\n return 6 # illegal column\n\n # SUBSEQUENT CODES REQUIRE KNOWLEDGE OF STATE OF GAME\n player = 1 if move_line_number % 2 == 0 else 2\n #print(move, player)\n\n # do the move (specified in file)\n # check validity (against previous moves, already checked for 'empty' board)\n # is that column taken, are there supporting pieces (below)\n\n\n last_move = None\n\n #MAKING MOVE\n horizontal_index = move - 1\n for vertical_index in range(y):\n if board[vertical_index][horizontal_index] == ' ':\n board[vertical_index][horizontal_index] = player\n\n last_move = horizontal_index, vertical_index\n\n\n if foundFourOrGameOver(board, last_move, player, z, previous_outcome=outcome):\n outcome = foundFourOrGameOver(board, last_move, player, z, previous_outcome=outcome)\n # return previous_outcome\n\n if last_move:\n break #no need to do any more looking, you made the move\n\n\n if outcome and move_line_number == len(content[1:])-1:#make sure you have read all the lines:\n #logger.error('OUT:',outcome)\n return outcome\n\n if not last_move: # the above loop broke b/c could not find a ' ' in the column (no move was made), his is an illegal row\n return 5 # illegal row\n\n\n\n\n\n # check if game is finished (board full), already won, draw - board full and no winner\n\n\n\n\n\n except (FileExistsError, FileNotFoundError) as e:\n logger.error(e)\n return 9\n\n\nif __name__ == '__main__':\n play()","sub_path":"connectz.py","file_name":"connectz.py","file_ext":"py","file_size_in_byte":7551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"79036006","text":"\n\n#calss header\nclass _UNTREATED():\n\tdef __init__(self,): \n\t\tself.name = \"UNTREATED\"\n\t\tself.definitions = [u'An untreated substance is not cleaned and has not had special substances added to protect it or make it safe to use: ', u'Untreated illnesses, injuries, people, or animals do not receive medical treatment: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'adjectives'\n\n\n\tdef run(self, obj1, obj2):\n\t\tself.jsondata[obj2] = {}\n\t\tself.jsondata[obj2]['properties'] = self.name.lower()\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/adjectives/_untreated.py","file_name":"_untreated.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"581463292","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 1999-2020 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom ...tensor import arithmetic\nfrom ...tensor import reduction\nfrom ...tensor.fuse import TensorNeFuseChunk\nfrom ...tensor.fuse.ne import NUMEXPR_INSTALLED\nfrom ...utils import build_fuse_chunk\n\nREDUCTION_OP = {reduction.TensorSum, reduction.TensorProd,\n reduction.TensorMax, reduction.TensorMin}\nSUPPORT_OP = {\n arithmetic.TensorAdd,\n arithmetic.TensorSubtract,\n arithmetic.TensorMultiply,\n arithmetic.TensorDivide,\n arithmetic.TensorPower,\n arithmetic.TensorMod,\n arithmetic.TensorNegative,\n arithmetic.TensorAbs,\n arithmetic.TensorConj,\n arithmetic.TensorExp,\n arithmetic.TensorLog,\n arithmetic.TensorLog10,\n arithmetic.TensorExpm1,\n arithmetic.TensorLog1p,\n arithmetic.TensorSqrt,\n\n arithmetic.TensorEqual,\n arithmetic.TensorNotEqual,\n arithmetic.TensorLessThan,\n arithmetic.TensorLessEqual,\n arithmetic.TensorGreaterThan,\n arithmetic.TensorGreaterEqual,\n\n arithmetic.TensorSin,\n arithmetic.TensorCos,\n arithmetic.TensorTan,\n arithmetic.TensorArcsin,\n arithmetic.TensorArccos,\n arithmetic.TensorArctan,\n arithmetic.TensorSinh,\n arithmetic.TensorCosh,\n arithmetic.TensorTanh,\n arithmetic.TensorArcsinh,\n arithmetic.TensorArccosh,\n arithmetic.TensorArctanh,\n\n arithmetic.TensorLshift,\n arithmetic.TensorRshift,\n\n arithmetic.TensorTreeAdd,\n arithmetic.TensorTreeMultiply,\n\n reduction.TensorSum,\n reduction.TensorProd,\n reduction.TensorMax,\n reduction.TensorMin\n}\n\n\ndef _check_reduction_axis(node):\n return len(node.op.axis) == 1 or len(node.op.axis) == node.ndim\n\n\ndef _support(node):\n op_type = type(node.op)\n if op_type in REDUCTION_OP:\n return _check_reduction_axis(node)\n return op_type in SUPPORT_OP\n\n\ndef _transfer_op(node):\n op = node.op\n if type(op) in REDUCTION_OP and not _check_reduction_axis(node):\n return op\n return op\n\n\nclass NeRuntimeOptimizer:\n def __init__(self, graph):\n self._graph = graph\n\n @classmethod\n def is_available(cls):\n return NUMEXPR_INSTALLED\n\n def optimize(self, keys=None):\n self.compose(keys=keys)\n\n def _compose_graph(self, composes):\n graph = self._graph\n composed_nodes = []\n\n for c in composes:\n head_node = c[0]\n tail_node = c[-1]\n\n composed_chunk = build_fuse_chunk(\n c, TensorNeFuseChunk,\n op_kw={'dtype': tail_node.dtype}).data\n graph.add_node(composed_chunk)\n for node in graph.iter_successors(tail_node):\n graph.add_edge(composed_chunk, node)\n for node in graph.iter_predecessors(head_node):\n graph.add_edge(node, composed_chunk)\n for node in c:\n graph.remove_node(node)\n composed_nodes.append(composed_chunk)\n\n return composed_nodes\n\n def compose(self, keys=None):\n composes = []\n explored = set()\n keys = set(keys or [])\n\n graph = self._graph\n for v in graph.topological_iter():\n if v.op.gpu or v.op.sparse:\n # break out\n return []\n if type(v.op) not in SUPPORT_OP or v.key in keys:\n continue\n if v in explored or type(v.op) in REDUCTION_OP: # TODO: check logic here\n continue\n if graph.count_successors(v) != 1:\n continue\n selected = [v]\n # add successors\n cur_node = graph.successors(v)[0]\n while graph.count_predecessors(cur_node) == 1 \\\n and _support(cur_node) and cur_node.key not in keys:\n selected.append(cur_node)\n if graph.count_successors(cur_node) != 1 \\\n or type(cur_node.op) in REDUCTION_OP:\n break\n else:\n cur_node = graph.successors(cur_node)[0]\n if len(selected) > 1:\n explored.update(selected)\n composes.append(list(selected))\n return self._compose_graph(composes)\n","sub_path":"mars/optimizes/runtime/ne.py","file_name":"ne.py","file_ext":"py","file_size_in_byte":4750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"276914264","text":"import matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom time import time\r\n\r\nclass RealtimePlot:\r\n def __init__(self,num,fig_size=(8,8)):\r\n #plt.ion()\r\n self.fig=plt.figure(figsize=fig_size)\r\n self.lines=[]\r\n self.axes=[]\r\n self.step=0\r\n self.x = list(np.arange(-10, 0, 1.0))\r\n self.num=num\r\n self.titles=[]\r\n\r\n def add_fig(self,init_val,pos,title=\"Figure\"):\r\n x=self.x\r\n self.titles.append(title)\r\n\r\n ax=plt.subplot2grid(self.num,pos)#self.fig.add_subplot(pos)\r\n ax.set_xlabel(\"epoch\")\r\n ax.set_ylabel(\"loss\")\r\n ax.set_title(title,loc=\"right\")\r\n ax.grid()\r\n\r\n line,=ax.plot(x,init_val)\r\n self.lines.append(line)\r\n self.axes.append(ax)\r\n\r\n def add_val(self,vals):\r\n self.step+=1\r\n self.x.append(self.step)\r\n x=self.x\r\n self.step+=1\r\n for i in range(len(self.lines)):\r\n title = self.titles[i]\r\n y=vals[i]\r\n ax = self.axes[i]\r\n ax.cla() #################################\r\n line ,=ax.plot(x,y) # self.lines[i]\r\n\r\n boxdic = {\r\n \"facecolor\": \"lightgreen\",\r\n \"edgecolor\": \"darkred\",\r\n \"boxstyle\": \"Round\",\r\n \"linewidth\": 2\r\n }\r\n\r\n\r\n line.set_xdata(x)\r\n line.set_ydata(y)\r\n\r\n ax.set_xlim(np.min(x),np.max(x))\r\n ax.set_ylim(np.min(y), np.max(y))\r\n\r\n ax.set_xlabel(\"epoch\")\r\n ax.set_ylabel(\"loss\")\r\n ax.set_title(title, loc=\"right\")\r\n ax.grid()\r\n\r\n ax.text(0, 1, \"test loss = \" + str(y[-1]), size=10, transform=ax.transAxes, bbox=boxdic)\r\n #plt.draw()\r\n\r\n def show_fig(self):\r\n #plt.draw()\r\n plt.pause(0.0001)\r\n #plt.cla()\r\ndef main():\r\n real=RealtimePlot((2,1),fig_size=(8,8))\r\n vals=list(np.zeros(10))\r\n y1=vals.copy()\r\n y2=vals.copy()\r\n y3=vals.copy()\r\n real.add_fig( vals,(0,0),title=\"Sin\")\r\n real.add_fig(vals,(1,0),title=\"Cos\")\r\n #real.add_fig(vals,313)\r\n step=0.01\r\n\r\n t=time()\r\n while 1:\r\n t1=time()\r\n sin=np.sin(step)\r\n cos=np.cos(step)\r\n step+=0.01\r\n s_time=np.abs(t-t1)\r\n\r\n y1.append(sin)\r\n y2.append(cos)\r\n #y3.append(np.gradient(s_time))\r\n real.add_val((y1,y2))\r\n real.show_fig()\r\n #t=time()\r\n\r\nif __name__==\"__main__\":\r\n main()\r\n\r\n","sub_path":"Takeda Seiyaku/Train/realtime_plot01.py","file_name":"realtime_plot01.py","file_ext":"py","file_size_in_byte":2508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"356210118","text":"name = 'Король Лич'\nhp = 300\ndamage_range = ( 100, 120 )\nloot = [ 'frostmourne' ]\n\ndef enter(user, reply):\n\tif user.rooms_count < 500:\n\t\treply('Жалкая курица, пшел вон!')\n\t\tuser.leave(reply)\n\telse:\n\t\treply('«Неужели прибыли, наконец, хваленные силы света? Мне бросить ледяную скорбь и сдаться на твою милость, герой?»')\n","sub_path":"rooms/monster/artas.py","file_name":"artas.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"8919534","text":"import sys\r\nimport socket\r\nimport os\r\n\r\n# Create a TCP/IP socket\r\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n\r\n# Connect the socket to the port where the server is listening\r\nserver_address = ('localhost', 10000)\r\nprint(f\"connecting to {server_address}\")\r\nsock.connect(server_address)\r\n\r\ntry:\r\n Data = \"list\"\r\n sock.send(Data.encode('utf-8'))\r\n sock.shutdown(socket.SHUT_WR)\r\n\r\n data = sock.recv(1024).decode()\r\n file = data\r\n while (data):\r\n data = sock.recv(1024).decode()\r\n file+=data\r\n print(\"List File: \")\r\n print(file)\r\n\r\nfinally:\r\n print(\"\\nClosing connection\")\r\n sock.close()","sub_path":"tugas4/client-list.py","file_name":"client-list.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"456546885","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport random\nimport numpy as np\nimport numpy.polynomial.polynomial as pol\nimport scipy.linalg as la\n\nfrom matrix import Matrix \n\nEPSILON = 0.001\n\n\ndef create_jordan_matrix(eigenValues, sizeOfBlocks):\n \"\"\"\n The function create_jordan_matrix get a list of eigen values and a list of the size of blocks.\n the function returns a pair of (J,Size) when J is jordan matrix of type numpy.ndarray and Size is the dimension of the matrix\n \n param: eigenValues is a list of eigen values\n param: sizeOfBlocks is the size of the blocks\n\n example of use:\n print(create_jordan_matrix([5,4] ,[1,3]))\n expected output:\n (array([[5., 0., 0., 0.],\n [0., 4., 1., 0.],\n [0., 0., 4., 1.],\n [0., 0., 0., 4.]]), 4)\n\n \"\"\"\n n=sum(sizeOfBlocks)\n k = 0\n l = 0\n vector1 = np.zeros(n, dtype=complex)\n vector2 = np.zeros(n-1)\n \n for i in range (len(sizeOfBlocks)):\n for r in range (sizeOfBlocks[i]):\n vector1[l] = eigenValues[k]\n l = l + 1\n k = k + 1\n\n k = 0\n l = 0\n for i in range (len(sizeOfBlocks)):\n for r in range (sizeOfBlocks[i]-1):\n vector2[l] = 1\n l = l + 1\n if l < n-2: \n vector2[l] = 0\n l = l + 1\n\n jordan = np.zeros((n,n), dtype=complex)\n k = 0\n l = 0\n for i in range(n):\n for j in range(n):\n if i == j:\n jordan[i][j] = vector1[k]\n k = k + 1\n if (i + 1) == j:\n jordan[i][j] = vector2[l]\n l = l + 1\n\n return (jordan,n) \n\n\ndef create_testable_matrix(p, j):\n a = la.inv(p) @ j @ p\n return a\n\n\ndef create_mejarden_matrix(matrix_size):\n flag = 0\n while flag == 0 :\n a = np.random.randint(-100, 100, size=(matrix_size, matrix_size))\n if np.linalg.matrix_rank(a) == matrix_size:\n return a\n return\n\n\n#not working\n#calc the poly with the matrix as x\ndef mat_pol_solve (arr_coef, matrix):\n poly_of_matirces = []\n for i in len(arr_coef):\n temp = matrix\n for j in range(0,i):\n temp = temp@matrix\n poly_of_matirces[i] = temp*arr_coef[i]\n return poly_of_matirces.sum()\n\n\n#checks if the poly is the right minimal poly of the matrix\ndef test_returned_min_polynom(matrix, poly):\n arr_coef = poly.coef #arr of the coef of the matrix\n \n #checks if the leading coef is 1\n if(arr_coef[sizeof(arr_coef)-1] != 1):\n return false\n \n #if the poly does not become 0 with the matrix as x than it isnt the minimal poly\n if(mat_pol_solve(arr_coef, matrix) != 0):\n return false\n \n arr_mat_eigenvals = la.eig(matrix)\n arr_poly_roots = pol.polyroots(poly)\n \n #checks if all the eigenvalues are roots of the poly and the poly has no other roots\n j=0\n for i in len(arr_poly_roots):\n if(j >= len(arr_mat_eigenvals)): return false #means that there are more roots than eigenvals\n while(j+1 < len(arr_mat_eigenvals) and arr_mat_eigenvals[j] == arr_mat_eigenvals[j+1]): #skips the same eigenvals\n j+=1\n if(arr_mat_eigenvals[j] != arr_poly_roots[i]): # means that there is a diff between eigenvals and roots\n return false\n j+=1\n if(j < len(arr_mat_eigenvals)-1): #means there are eigenvals that are not roots\n return false\n \n #checks if the poly is minimal\n for i in len(arr_mat_eigenvals):\n while(i+1 < len(arr_mat_eigenvals) and arr_mat_eigenvals[i] == arr_mat_eigenvals[i+1]): #skips the same eigenvals\n i+=1\n if(mat_pol_solve(pol.polydiv(arr_coef, (-arr_mat_eigenvals[i], 1)), matrix) == 0):\n return false\n \n return true \n \n\ndef selection_sort(x):\n for i in range(len(x)):\n swap = i + np.argmin(x[i:])\n (x[i], x[swap]) = (x[swap], x[i])\n return x\n\n\ndef test_returned_characteristic_polynom(polynom,matrix):\n eignValues = la.eigvals(matrix)\n coefArray = polynom.coef\n polyRoots = np.polyroots(polynom)\n \n if(len(polyRoots) != len(eignValues)) : return False\n \n arr1 = selection_sort(eignValus)\n arr2 = selection_sort(polyRoots)\n \n if(arr1 != arr2):\n return False\n \n return True\n\ndef is_diagnosiable(M):\n values, vectors = np.linalg.eig(np.array(M))\n if len(vectors) != len(M):\n print(\"S not diagoniasble\")\n return False\n return True\n\n\ndef almost_equal(a, b, threshold=EPSILON):\n c = a - b\n if ((abs(c) <= threshold).all()):\n return True\n else:\n return False\n\n\ndef is_nilpotent(M, epsilon = EPSILON):\n values = np.linalg.eigvals(np.array(M))\n for i in values:\n if abs(i) > epsilon:\n return False\n return True\n\n\ndef test_returned_jordan_chevallier(T, S, N):\n shape_s = S.shape\n shape_n = N.shape\n shape_t = T.shape\n if shape_t != shape_s:\n print(\"Unmatching dimensions of T,S\")\n return False\n if shape_s != shape_n:\n print(\"Unmatching dimensions of S,N\")\n return False\n if shape_t != shape_n:\n print(\"Unmatching dimensions of T,N\")\n return False\n if shape_s[0] != shape_s[1]:\n print(\"T not square matrix\")\n return False\n X = S + N\n if not almost_equal(X, T):\n print(\"N+S != T\")\n return False\n A = N @ S\n B = S @ N\n if not almost_equal(X, T):\n print(\"NS != SN\")\n return False\n if not is_diagnosiable(S):\n print(\"S is not diagonal\")\n return False\n if not is_nilpotent(N):\n print(\"N is not Nilpotent\")\n return False\n return True\n\n\ndef has_jordan_form(j):\n m,n = j.shape\n if m != n:\n return False\n\n for x in range(m):\n for y in range(n):\n if x > y and j.item((x, y)) != 0:\n return False\n if x + 1 < y and j.item((x, y)) != 0:\n return False\n for x in range(m-1):\n if j.item((x, x)) != j.item((x+1, x+1)):\n if j.item((x, x+1)) != 0:\n return False\n elif j.item((x, x+1)) != 1 and j.item((x, x+1)) != 0:\n return False\n return True\n\n\ndef test_returned_P_Mejardenet_matrix(a, j, p):\n b = la.inv(p) @ a @ p\n if almost_equal(b, j) and has_jordan_form(j):\n return True\n return False\n\n\ndef test_matrix(A, printA):\n if printA:\n print(\"**** testing matrix *****\")\n print(A)\n print(\"--------------------------\")\n try:\n M = Matrix(A)\n except:\n print(\"matrix analysis failed\")\n return False\n if (test_returned_P_Mejardenet_matrix(A, M.getJordanForm(), M.getPmejardent())):\n print(\"P and J matricies are good\")\n else:\n print(\"P and J matricies are bad\")\n return False\n '''\n On hold - need to convert ndarray to polynomial in order to test it\n if (test_returned_min_polynom(A, M.getCharacteristicPolynomial())):\n print(\"min polynom is good\")\n else:\n print(\"min polynom is bad\")\n return False\n \n if (test_returned_characteristic_polynom(M.getMinimalPolynomial(), A)):\n print(\"characteristic polynom is good\")\n else:\n print(\"characteristic polynom is bad\")\n return False\n '''\n if (test_returned_jordan_chevallier(A, M.S, M.N)):\n print(\"jordan chevalier matricies are good\")\n else:\n print(\"jordan chevalier matricies are bad\")\n return False\n \n return True\n\n\ndef run_test(eigen, blocks): \n J, size = create_jordan_matrix(eigen, blocks)\n P = create_mejarden_matrix(size)\n A = create_testable_matrix(P, J)\n print(\"####### testing Jordan Specific matrix #######\")\n print(J)\n print(\"--------------------------\")\n return test_matrix(A, printA=False)\n\n\ndef random_realNumbers_tests(num_tests):\n for i in range(num_tests):\n # set how many jordan-blocks\n numBlocks = random.randint(2,5)\n eigen = []\n blocks = []\n for b in range(numBlocks):\n # eigenvalue to be from range -10 to 10, and jordan block to be in size 1-5\n eigen.append(random.randint(-3, 3))\n blocks.append(random.randint(1,3))\n run_test(eigen, blocks)\n\n\ndef random_matricies_test(num_tests, matrix_size):\n for i in range(num_tests):\n A = np.random.rand(matrix_size, matrix_size)\n A = (100*A).round(0)\n print(\"running test on: \")\n print(A)\n try:\n M = Matrix(A)\n if (test_matrix(M)):\n print(\"test passed\")\n else:\n print(\"test failed\")\n except:\n print(\"Matrix analysis failed\")\n \nif __name__ == '__main__':\n eigen = [1, 2, 3]\n blocks = [1, 1, 1]\n # diag with 1,2,3 on the diag\n run_test(eigen, blocks)\n\n eigen = [1, 2, 3]\n blocks = [3, 2, 1]\n # jordan matrix that looks like this:\n '''\n 1 1\n 0 1\n 0 1\n 0 0\n 3\n '''\n run_test(eigen, blocks)\n\n eigen = [1+1j, 1-1j, 3]\n blocks = [2, 2, 3]\n # jordan matrix that looks like this:\n '''\n 1+i 1\n 0 1+i\n 1-i 1\n 0 1-i\n 3 1\n 0 3 1\n 0 0 3\n '''\n run_test(eigen, blocks)\n\n eigen = [1, 1, 1, 1]\n blocks = [2, 3, 4, 1]\n # jordan matrix that looks like this:\n '''\n 1 1\n 0 1 0\n 1 1\n 0 1 1\n 0 0 1 0\n 1 1\n 0 1 1\n 0 0 1 1\n 0 0 0 1 0\n 1\n '''\n run_test(eigen, blocks)\n\n # test for random matricies between sizes specified\n for i in range(2, 4):\n try:\n if(random_matricies_test(10,i)):\n print(\"test passed\")\n except:\n pass\n \n # test for random Jordan matricies\n random_realNumbers_tests(20)\n","sub_path":"backend/run_test.py","file_name":"run_test.py","file_ext":"py","file_size_in_byte":9866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"417855092","text":"def extended_gcd(n1, n2):\n \"\"\"Return (bezout_a, bezout_b, gcd) using the extended euclidean algorithm.\"\"\"\n x, lastx = 0, 1\n y, lasty = 1, 0\n while n2 != 0:\n quotient = n1 // n2\n n1, n2 = n2, n1 % n2\n x, lastx = lastx - quotient*x, x\n y, lasty = lasty - quotient*y, y\n bezout_a = lastx\n bezout_b = lasty\n gcd = n1\n return (bezout_a, bezout_b, gcd)\n\n\nif __name__ == \"__main__\":\n import unittest\n \n class TestGCD(unittest.TestCase):\n def test_extended_gcd(self):\n n1, n2 = 252,105\n \n a, b, gcd = extended_gcd(n1, n2)\n self.assertEquals(gcd, 21)\n self.assertEquals(a, -2)\n self.assertEquals(b, 5)\n self.assertEquals(a*n1+b*n2, gcd) # bezout's identify: just for documentation\n \n unittest.main()\n ","sub_path":"src/shamir/gcd.py","file_name":"gcd.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"319172206","text":"# -*- coding: utf-8 -*-\r\n\r\nimport os\r\nimport numpy as np\r\nfrom shutil import copyfile\r\nimport matplotlib.pyplot as plt\r\nimport source as qc\r\n\r\n\r\nvektorer_1 = [\"01\",\"02\",\"04\",\"05\",\"06\",\"07\",\"08\"]\r\nvektorer_2 = [\"03\",\"09\",\"10\",\"11\",\"12\"]\r\n\r\n''' Gör saker '''\r\n\r\n''' Hämta data från gaussian-.log-filer. '''\r\noptimerad_geometri = qc.get_opt_geo(\"/data1/daniel/projekt/optfreq/C3v.log\")\r\nfrekvenser, massor, kraftkonstant, distvektorer = qc.get_freq_analysis(\"/data1/daniel/projekt/optfreq/C3v.log\")\r\n\r\n''' Definiera styrkor '''\r\nstyrka_1 = np.arange(-1, 1.1, 0.1)\r\nstyrka_1[(len(styrka_1)-1)/2] = 0.0 #Gör om mittenstyrkan till 0.0, inte ex. 2.5555*10**-10\r\n\r\nstyrka_2 = np.arange(-0.5, 0.55, 0.05) # Andra styrkor för vissa distvektorer.\r\nstyrka_2[(len(styrka_2)-1)/2] = 0.0\r\n\r\n\r\n''' Anpassade styrkor för varje vektor. Skriv bara om vektorer_i och styrka_i för göra nya. '''\r\n#for i in vektorer_1:\r\n\t#qc.make_MOLCAS_xyz(optimerad_geometri, distvektorer[int(i)-1], styrka_1, \"vektor_\" + i )\r\n\r\n#for j in vektorer_2:\r\n\t#qc.make_MOLCAS_xyz(optimerad_geometri, distvektorer[int(j)-1], styrka_2, \"vektor_\" + j )\r\n\r\nfor i in vektorer_1:\r\n\tqc.make_MOLCAS_geo(optimerad_geometri, distvektorer[int(i)-1], styrka_1, \"vektor_\" + i )\r\n\r\nfor j in vektorer_2:\r\n\tqc.make_MOLCAS_geo(optimerad_geometri, distvektorer[int(j)-1], styrka_2, \"vektor_\" + j )\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"make_MOLCAS_xyz.py","file_name":"make_MOLCAS_xyz.py","file_ext":"py","file_size_in_byte":1357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"503784736","text":"# main.py\n\nfrom flask import session, Blueprint, render_template, redirect, url_for, request, flash\nfrom flask_login import login_required, current_user\n\nimport ldap\n\nmain = Blueprint('main', __name__)\n\ndef ldapQuery():\n # This should be in a separate module along with other LDAP CRUD ops\n SCOPE_SUBTREE = 2\n l = ldap.initialize('ldap://192.168.159.131:389')\n result = l.search_s('uid=clearice,ou=People,dc=testldap,dc=com', SCOPE_SUBTREE)\n ldap_encoded = result[0][1]['credential'][0]\n return ldap_encoded\n\n@main.route('/')\ndef index():\n if current_user.is_authenticated:\n return redirect(url_for('main.profile'))\n\n return render_template('index.html')\n\n@main.route('/profile')\n@login_required\ndef profile():\n authenticator_data = ldapQuery()\n #print(\"authenticator_data: \", authenticator_data)\n\n twofactor_enabled_for_user = not (authenticator_data == b'None' or authenticator_data == None)\n print(\"twofactor_enabled_for_user: \", twofactor_enabled_for_user)\n\n if twofactor_enabled_for_user:\n if ('twofactor_authenticated' not in session) or (not session['twofactor_authenticated']):\n # User has not done 2FA yet\n print('NEED 2FA AUTHENTICATION')\n return redirect(url_for('auth.twofactor'))\n else:\n # 2FA device registered, and user has authenticated with it\n print('2FA AUTHED')\n flash('You are the most secure person in the world!')\n return render_template('profile.html', name=current_user.name, user_is_super_secure=True)\n elif not twofactor_enabled_for_user:\n # 2FA Not enabled, tell user to enable it\n print('NO 2FA DEVICE FOR USER')\n flash('You should really register a Two Factor authenticator!')\n return render_template('profile.html', name=current_user.name)\n\n\n","sub_path":"project/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"449358835","text":"results = [[4, 3], [4, 2], [3, 2], [1, 2], [2, 5]]\nn = 5\nranking = {}\nfor i in range(1, n+1):\n win = []\n for j in results:\n if j[0]==i:\n win.append(j[1])\n\n ranking.update({i: win})\n\n\n\nmatrix = [[None for _ in range(n)] for _ in range(n)]\nfor win, lose in results:\n matrix[win - 1][lose - 1] = True\n matrix[lose - 1][win - 1] = False\n\nfor i in range(n):\n for j in range(n):\n for k in range(n):\n if matrix[j][i] == None:\n continue\n\n if matrix[j][i] == matrix[i][k]:\n matrix[j][k] = matrix[j][i]\n matrix[k][j] = not matrix[j][i]\n\nanswer = 0\nfor i in range(n):\n if None in matrix[i][:i] + matrix[i][i + 1:]:\n continue\n answer += 1","sub_path":"programmers/ranking.py","file_name":"ranking.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"370854876","text":"import pyperclip\r\n\r\n\r\ndef main():\r\n msg = input('Enter your Text: ')\r\n myKey = 5\r\n ciphertext = encryptMessage(myKey, msg)\r\n\r\n\r\n print(\"Encrypted Test is:\",ciphertext)\r\n pyperclip.copy(ciphertext)\r\n\r\n\r\ndef encryptMessage(key, message):\r\n ciphertext = [''] * key\r\n\r\n for col in range(key):\r\n position = col\r\n while position < len(message):\r\n ciphertext[col] += message[position]\r\n position += key\r\n return ''.join(ciphertext) # Cipher text\r\n\r\n\r\nif __name__ == '__main__':\r\n main()","sub_path":"BasicLibraries-master/Ciphers - Byte Conversion/Transposition Cipher_enc.py","file_name":"Transposition Cipher_enc.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"428223081","text":"import rmx_preprocessing\nimport rmx_postprocessing\nfrom REMix.src.preprocessing.prepare_lstm_data import prep_lstm_run_data\nimport torch\nimport pandas as pd\nimport numpy as np\nimport evaluation\n\n\n############# Angabeparameter ###########################################\n\nland = 'DEU'\ndatum ='2014-04-07 00:00:00'\nmodel2run= 'demand'\n\n#########################################################################\n\nmodels = ['demand', 'wind']\n\nif model2run == models[0]:\n rmx_preprocessing.input_matrix_builder_to_run_lstm( country = land,\n obs_time = [datum])\n\n\n prep_lstm_run_data(datum = datum)\n\n ##### RUN ######\n #\n # .. to load your previously training model:\n model=torch.load('REMix/models/trained_model_lstm_180.pt')\n\n ## load training data\n X_run = np.load('REMix/data/transformed/lstm_Xrun_scaled.npy')\n y_run = pd.read_csv('REMix/data/transformed/lstm_yrun_scaled.csv', header=None)\n position = pd.read_csv('REMix/data/transformed/lstm_x_position_run.csv', header=None)\n\n\n x_run = torch.tensor(X_run, dtype=torch.float32)\n y_run= torch.tensor(y_run.values.reshape(-1,1), dtype=torch.float32)\n\n x_run = x_run.reshape(-1, 24, 9).transpose(0,1)\n\n model.hidden = model.init_hidden(x_run.shape[1])\n outputs = model(x_run)\n\n #y_pred = torch.max(outputs[0,:,:].data, 1)[1]\n\n y_pred =outputs[0,:,:].detach().numpy()\n y_measured = y_run.numpy()\n y_predicted = y_pred\n\n #plt.plot(range(0,len(y_pred_test)),y_pred_test)\n for n in range(0, 100):\n print(str(y_pred[n]) + \" \" + str(y_measured[n]))\n\n output_list = list()\n #y_measured = torch.from_numpy(y_train.values.reshape(-1, 1)).float()\n\n #output_list = [y_predicted,y_measured,position.values.reshape(-1,1)]\n output_list1 = pd.DataFrame(y_predicted,columns=['y_predicted'])\n output_list2 = pd.DataFrame(y_measured,columns=['y_measured'])\n output_list3 = pd.DataFrame(position.values.reshape(-1,1),columns=['position'])\n\n pd.concat([output_list1,output_list2, output_list3],axis = 1).to_csv('REMix/results/lstm/result_ypred_ymeas_run.csv')\n evaluation.print_errors(output_list1, output_list2)\n print(output_list3)\n\n #### Auswertung und Plot ####\n ######\n Input_test = pd.read_csv('REMix/results/lstm/result_ypred_ymeas_run.csv')\n #rmx_postprocessing.plot_landkarte_und_punkte(country = 'DEU')\n\n #rmx_postprocessing.plot_test_values(df=pd.DataFrame(),*,postion=None, savename ='nodes_accuracies', modeling ='test')\n rmx_postprocessing.plot_test_values(df=Input_test, modeling = 'run')\n\n#if model2run == models[1]:","sub_path":"remix_run_function.py","file_name":"remix_run_function.py","file_ext":"py","file_size_in_byte":2631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"23403892","text":"#!/usr/bin/env python3\n\nfrom tkinter import *\nfrom tkinter import ttk\nfrom tkinter import font\nimport time\nfrom collections import Counter\n\ndef quit(*args):\n\troot.destroy()\n\ndef show_time():\n\ttxt.set(time.strftime(\"%H:%M:%S\"))\n\tc.set(Counter())\n\troot.after(1000, show_time)\n\troot.after(1000, show_time)\n\ndef update():\n\ttxt.set(Counter().values())\n\troot.after(1000, show_time)\n\tc = Counter()\n\nroot = Tk()\nroot.attributes(\"-fullscreen\", True)\nroot.configure(background='black')\nroot.bind(\"\", quit)\nroot.bind(\"x\", quit)\nroot.after(1000, show_time)\n\nfnt = font.Font(family='Helvetica', size=128, weight='bold')\nc=StringVar()\ntxt = StringVar()\ntxt.set(time.strftime(\"%H:%M:%S\"))\n#lbl = ttk.Label(root, textvariable=txt, font=fnt, foreground=\"grey\", background=\"black\")\n#lbl.place(relx=0.5, rely=0.5, anchor=CENTER)\n\nlbl2 = ttk.Label(root, textvariable=c, font=fnt, foreground=\"grey\", background=\"black\")\nlbl2.place(relx=0.5, rely=0.5, anchor=CENTER)\n\nroot.mainloop()","sub_path":"tkinter_learn/fulscreen.py","file_name":"fulscreen.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"236325614","text":"# coding=utf-8\n\"\"\"\n\nPROBLEM 041 - Pandigital Prime\n\nWritten by: Yuanjie Li\nDate: June 26, 2018\n\nWe shall say that an n-digit number is pandigital if it makes use of all the\ndigits 1 to n exactly once. For example, 2143 is a 4-digit pandigital and is\nalso prime.\n\nWhat is the largest n-digit pandigital prime that exists?\n\n\"\"\"\nimport sys\nsys.path.append('../utils')\nimport utils\nimport math\n\ndef main():\n best = 0\n pandig = ['1','2','3','4','5','6','7','8','9']\n\n print(\"Generating primes array ... \")\n primeArr = utils.getPrimeArr(999)\n print(\"Done.\")\n for i in range(4,10):\n print(\"Updating prime array to %s...\" % int(math.pow(10,i)))\n primeArr = utils.updatePrimeArr(int(math.pow(10,i)), primeArr)\n print(\"Updated.\")\n\n # Subset and get all permutations of these.\n pan_check = [int(x) for x in pandig[:i]]\n all_check = utils.permute(pan_check)\n\n for permute in all_check:\n number = \"\"\n for digit in permute:\n number += str(digit)\n\n number = int(number)\n if number in primeArr and number > best:\n best = number\n print(\"The current best is: %i\" % best)\n\n print('The best prime is: %i' % best)\n print(best)\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"p041/pandig_prime.py","file_name":"pandig_prime.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"263031872","text":"# -*- coding: utf-8 -*-\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\nfrom app.views import *\n\nurlpatterns = [\n url(r'^home/$', relatorio_home, name='home'),\n url(r'^$', relatorio_home, name='home'),\n url(r'^cadastro_usuario/$', cadastro_usuario, name='cadastro_usuario'),\n url(r'^cadastro_viatura/$', cadastro_viatura, name='cadastro_viatura'),\n url(r'^cadastro_unidade/$', cadastro_unidade, name='cadastro_unidade'),\n url(r'^cadastro_pessoa/$', cadastro_pessoa, name='cadastro_pessoa'),\n url(r'^cadastro_equipe/$', cadastro_equipe, name='cadastro_equipe'),\n url(r'^cadastro_efetivo_disponivel/$', cadastro_efetivo_disponivel, name='cadastro_efetivo_disponivel'),\n url(r'^lista_viatura/$', viatura_list, name='lista_viatura'),\n url(r'^lista_unidade/$', unidade_list, name='lista_unidade'),\n url(r'^lista_pessoa/$', pessoa_list, name='lista_pessoa'),\n url(r'^lista_equipe/$', lista_equipe, name='lista_equipe'),\n url(r'^lista_efetivo_disponivel/$', lista_efetivo_disponivel, name='lista_efetivo_disponivel'),\n url(r'^viatura/(?P\\d+)/visualizar$', visualizar_viatura, name='visualizar_viatura'),\n url(r'^pessoa/(?P\\d+)/visualizar$', visualizar_pessoa, name='visualizar_pessoa'),\n url(r'^unidade/(?P\\d+)/visualizar$', visualizar_unidade, name='visualizar_unidade'),\n url(r'^equipe/(?P\\d+)/visualizar$', visualizar_equipe, name='visualizar_equipe'),\n url(r'^efetivo_disponivel/(?P\\d+)/visualizar$', visualizar_efetivo_disponivel, name='visualizar_efetivo_disponivel'),\n url(r'^unidade/(?P\\d+)/editar$', editar_unidade, name='editar_unidade'),\n url(r'^viatura/(?P\\d+)/editar$', editar_viatura, name='editar_viatura'),\n url(r'^usuario/(?P[\\d]+)/$', editar_usuario, name='editar_usuario'),\n url(r'^pessoa/(?P\\d+)/editar$', editar_pessoa, name='editar_pessoa'),\n url(r'^equipe/(?P\\d+)/editar$', editar_equipe, name='editar_equipe'),\n url(r'^equipe/(?P\\d+)/desalocar$', desalocar_equipe, name='desalocar_equipe'),\n url(r'^efetivo_disponivel/(?P\\d+)/editar$', editar_efetivo_disponivel, name='editar_efetivo_disponivel'),\n \n]\n","sub_path":"app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"50550141","text":"from selenium import webdriver\n\ndriver=webdriver.Chrome(executable_path=\"C:/ChromeDriver/chromedriver_win32/chromedriver\")\ndriver.maximize_window()\ndriver.get(\"http://demo.automationtesting.in/Windows.html\")\n\ndriver.find_element_by_xpath(\"//*[@id='Tabbed']/a/button\").click()\n\nprint(driver.current_window_handle)\n\nhandles = driver.window_handles\nfor h in handles:\n driver.switch_to.window(h)\n print(driver.title)\n if driver.title == \"SeleniumHQ Browser Automation\": # To close specific window\n driver.close()\n\n\n# driver.quit() # Close all windows\n# driver.close # close Parent window\n","sub_path":"HandlingBrowser.py","file_name":"HandlingBrowser.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"537006834","text":"import time, sys, cherrypy, os\n\nfrom webapp import create_app\nfrom pyspark import SparkContext, SparkConf\nfrom pyspark.mllib.tree import DecisionTree, DecisionTreeModel\nimport logging\n \ndef init_spark_context():\n\n global predictionModel\n\n # load spark context\n conf = SparkConf().setAppName(\"movie_recommendation-server\")\n\n # IMPORTANT: pass aditional Python modules to each worker\n sc = SparkContext(conf=conf, pyFiles=['webapp.py', 'service_func.py'])\n\n # absolute path in hdfs\n # to run locally, remove first slash '/' i.e my_model1, not /my_model1\n\n predictionModel = DecisionTreeModel.load(sc, '/my_model1')\n sc.addFile( 'conv/6.p')\n sc.addFile( 'conv/7.p')\n sc.addFile( 'conv/8.p')\n sc.addFile('conv/10.p')\n sc.addFile('conv/12.p')\n sc.addFile( 'conv/36.p')\n\n return sc\n \n \ndef run_server(app):\n logging.basicConfig(level=logging.INFO)\n logger = logging.getLogger(__name__)\n # Mount the WSGI callable object (app) on the root directory\n cherrypy.tree.graft(app, '/')\n \n # Set the configuration of the web server\n cherrypy.config.update({\n 'engine.autoreload.on': True,\n 'log.screen': True,\n 'server.socket_port': 80,\n 'server.socket_host': '0.0.0.0'\n })\n \n # Start the CherryPy WSGI web server\n cherrypy.engine.start()\n cherrypy.engine.block()\n \n \nif __name__ == \"__main__\":\n # Init spark context and load libraries\n sc = init_spark_context()\n app = create_app(sc, predictionModel)\n \n # start web server\n run_server(app)\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"56129123","text":"import pymongo\n\n\n#gets you the handler on the mongo client\nclient = pymongo.MongoClient()\n#choose the data base\ndb = client.Surveys\n#choose the collection\ncollection = db.usersurveystemp\n#example code\ndef InsertDummyRecords():\n\tcollection.insert({\"driverID\" : \"JohnD@example.com\", \"start_long\" : \"33.2991\"})\n\ndef InsertSurveyResponse(surveyResponse):\n\tcollection.insert(surveyResponse)\n\n## Returns the average scores before iolab, average scores after iolab and number of total responses\ndef GetAggregatedResults():\n\tfocus_values = collection.distinct(\"focus\")\n\tpipeline1 = [\n\t\t{\n\t\t\t\"$group\": {\n\t\t\t\t\"_id\": \"null\", \n\t\t\t\t\"fe-before-avg\": {\"$avg\": \"$fe-before\"}, \n\t\t\t\t\"fe-after-avg\": {\"$avg\": \"$fe-after\"},\n\t\t\t\t\"count\": {\"$sum\": 1}\n\t\t\t}\n\t\t}\n\t]\n\tresult = collection.aggregate(pipeline1).next()\n\n\tpipeline2 = [\n\t\t{\n\t\t\t\"$group\": {\n\t\t\t\t\"_id\": \"$focus\", ## Group by focus value and not focus key\n\t\t\t\t\"count\": {\"$sum\": 1}\n\t\t\t}\n\t\t}\n\t]\n\tfor doc in collection.aggregate(pipeline2):\n\t\tresult[doc[\"_id\"]] = doc[\"count\"]\n\treturn result\n\n\n\nif __name__ == \"__main__\":\n\tInsertDummyRecords()","sub_path":"database/lab_mongo/app/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"377886240","text":"#-*-coding:utf-8-*-\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.chrome.options import Options\r\nfrom bs4 import BeautifulSoup\r\nimport unittest\r\nfrom time import sleep\r\n\r\nclass testDouyu(unittest.TestCase):\r\n def setUp(self):\r\n options = Options()\r\n #谷歌无头浏览器设置,不用弹出浏览器\r\n options.add_argument('--headless')\r\n options.add_argument('--disable-gpu')\r\n self.driver = webdriver.Chrome(options=options)\r\n #self.driver = webdriver.PhantomJS()\r\n def testCase(self):\r\n\r\n #截取全屏\r\n self.driver.save_screenshot(\"1.png\")\r\n self.driver.get(\"https://www.douyu.com/directory/all\")\r\n while True:\r\n\r\n soup = BeautifulSoup(self.driver.page_source, 'lxml')\r\n roomTypeList = soup.find_all(\"span\",{\"class\":\"tag ellipsis\"})\r\n list1 = []\r\n list2 = []\r\n list3 = []\r\n for roomTypeName in roomTypeList:\r\n list1.append(roomTypeName.get_text())\r\n renqiNameList = soup.find_all(\"span\", {\"class\": \"dy-num fr\"})\r\n for renqi in renqiNameList:\r\n list2.append(renqi.get_text())\r\n roomNameList = soup.find_all(\"h3\", {\"class\": \"ellipsis\"})\r\n for roomName in roomNameList:\r\n list3.append(roomName.get_text())\r\n #获取三个值\r\n for i in range(len(list2)):\r\n print(list3[i].strip()+\"\\t\\t\\t\\t\"+list1[i].strip()+\"\\t\"+list2[i])\r\n try:\r\n if self.driver.page_source.find(\"shark-pager-next shark-pager-disable shark-pager-disable-next\") != -1:\r\n break\r\n elif self.isElementExist() == True:\r\n self.driver.find_element_by_class_name(\"shark-pager-next\").click()\r\n sleep(3)\r\n except Exception as reason:\r\n print(reason)\r\n\r\n\r\n # 该方法用来确认元素是否存在,如果存在返回flag=true,否则返回false\r\n\r\n def isElementExist(self):\r\n flag = True\r\n browser = self.driver\r\n try:\r\n browser.find_element_by_class_name(\"shark-pager-next\")\r\n return flag\r\n\r\n except:\r\n flag = False\r\n return flag\r\n\r\n def tearDown(self):\r\n self.driver.quit()\r\n\r\nif __name__ == \"__main__\":\r\n unittest.main()","sub_path":"douyuTest.py","file_name":"douyuTest.py","file_ext":"py","file_size_in_byte":2386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"32032581","text":"# -*- coding: utf-8 -*-\r\nimport string\r\n\r\nclass FormatearNumeros():\r\n @staticmethod\r\n def toNumTelefonico(cad):\r\n cadInv = cad[::-1] # invierte la cadena\r\n\r\n t = 0\r\n nuevaCadena = ''\r\n for i in cadInv:\r\n nuevaCadena += i\r\n t += 1\r\n if t in [2, 4, 7, 10]:\r\n nuevaCadena += '.'\r\n\r\n if nuevaCadena[-1] == '.':\r\n nuevaCadena = nuevaCadena[:-1]\r\n return nuevaCadena[::-1]\r\n\r\n @staticmethod\r\n def toNumMilesSigno(cad, signo=''):\r\n cad = str(cad)\r\n cadInv = cad[::-1] # invierte la cadena\r\n t = 0\r\n nuevaCadena = ''\r\n for i in cadInv:\r\n nuevaCadena += i\r\n t += 1\r\n if t in [3, 6, 9, 12, 15]:\r\n nuevaCadena += '.'\r\n if nuevaCadena[-1] == '.':\r\n nuevaCadena = nuevaCadena[:-1]\r\n if signo == '':\r\n return nuevaCadena[::-1]\r\n else:\r\n return signo + ' ' + nuevaCadena[::-1]\r\n\r\n @staticmethod\r\n def toNumSinPuntos(cad):\r\n nuevaCadena = ''\r\n for i in cad:\r\n if i != '.':\r\n nuevaCadena += i\r\n return int(nuevaCadena)\r\n\r\n @staticmethod\r\n def extraerCant_idProducto(cad):\r\n lista = []\r\n sw = 0\r\n num1 = ''\r\n num2 = ''\r\n i = 0\r\n for i in range(len(cad)):\r\n if cad[i] in string.digits:\r\n num1 += cad[i]\r\n sw = 1\r\n else:\r\n if sw == 1:\r\n break\r\n cad2 = cad[i + 1:]\r\n sw = 0\r\n for j in range(len(cad2)):\r\n if cad2[j] in string.digits:\r\n num2 += cad2[j]\r\n sw = 1\r\n else:\r\n if sw == 1:\r\n break\r\n if num1 != '':\r\n lista.append(num1)\r\n if num2 != '':\r\n lista.append(num2)\r\n\r\n return lista","sub_path":"pyeay/formato.py","file_name":"formato.py","file_ext":"py","file_size_in_byte":1951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"441500071","text":"#!/usr/bin/python\n# encoding: utf-8\nimport sys\n\nfrom edAction import query_all_release_plan_list\nfrom utils import wf, get_args, from_time_stamp\n\n\ndef key_for_record(test_apply_item):\n return test_apply_item['name']\n\n\ndef main(workflow):\n query, mis, cache_seconds = get_args()\n online_plan_list = query_all_release_plan_list()\n online_plan_list = wf().filter(query, online_plan_list, key_for_record, min_score=20)\n if online_plan_list:\n for online_plan in online_plan_list:\n wf().add_item(online_plan['name'],\n u'预计上线时间:{}'.format(from_time_stamp(online_plan['projectOnlineTime'] / 1000)),\n online_plan['id'], valid=True)\n else:\n wf().add_item('no result', valid=False)\n wf().send_feedback()\n\n\nif __name__ == '__main__':\n sys.exit(wf().run(main))\n # delete_cookies(LOG_CENTER_COOKIE_NAME)\n","sub_path":"alfredConfig/Alfred.alfredpreferences/workflows/user.workflow.B2F13413-0F41-400D-A18A-0EAADA34E2DD/edOnlionPlanList.py","file_name":"edOnlionPlanList.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"289520035","text":"import pandas as pd\n\ndef f_clean(filename):\n\n\n file = filename + '.csv'\n\n df = pd.read_csv(file)\n\n\n trial= df.copy()\n\n #subsetting\n trial = df[['type','likes/views','age','hashtags','mentions']]\n\n\n #filling NA\n trial[\"hashtags\"].fillna(\"gak_ada\", inplace = True) \n trial[\"mentions\"].fillna(\"gak_ada\", inplace = True) \n\n\n\n\n #

Feature Engineering

\n # \n # 1. Categorical Likes / Views\n # 2. Hashtags\n # 3. Mentions\n # 4. Age\n # \n\n\n #### categorical for likes/views ####\n\n\n\n #video \n temp = []\n\n for i in trial['type']:\n if i == 'video':\n temp.append(1)\n \n else:\n temp.append(0)\n \n trial['type_video'] = temp \n\n\n temp = []\n #photo\n for i in trial['type']:\n if i == 'photo':\n temp.append(1)\n \n else:\n temp.append(0)\n\n trial['type_photo'] = temp \n\n\n\n #### Hashtags ####\n\n #temporary values\n placeholder = trial['hashtags']\n\n\n #counter for number of hashtags\n\n temp = []\n for i in placeholder:\n if i == 'gak_ada':\n counter = 0\n temp.append(counter)\n else:\n counter = i.count(\",\") + 1\n temp.append(counter)\n \n \n #append to dataframe\n trial['num_hashtags'] = temp\n\n\n\n #### Mentions ####\n\n #temporary values\n placeholder = trial['mentions']\n\n\n #counter for number of hashtags\n\n temp = []\n for i in placeholder:\n if i == 'gak_ada':\n counter = 0\n temp.append(counter)\n else:\n counter = i.count(\",\") + 1\n temp.append(counter)\n \n \n #append to dataframe\n trial['num_mentions'] = temp\n\n # append m\n time = trial['age'].str.contains('m')\n\n placeholder = []\n\n \n\n for i in time:\n if i == True:\n placeholder.append('m')\n else:\n continue\n\n\n\n # append h \n time = trial['age'].str.contains('h')\n\n \n\n for i in time:\n if i == True:\n placeholder.append('h')\n else:\n continue\n\n time = trial['age'].str.contains('d')\n\n # append d \n\n for i in time:\n if i == True:\n placeholder.append('d')\n else:\n continue\n \n # append w\n\n time = trial['age'].str.contains('w')\n\n for i in time:\n if i == True:\n placeholder.append('w')\n else:\n continue\n\n\n #slicing the age feature\n\n\n\n trial['age_num'] = trial['age'].replace(to_replace = ['m','d','w','h'], value = '', regex=True)\n #trial['age_num'] = trial['age'].replace(to_replace = 'w', value = '', regex=True)\n #trial['age_num'] = trial['age'].replace(to_replace = 'h', value = '', regex=True)\n\n\n # multiplier\n\n\n haha = []\n\n\n for i in placeholder:\n if i == 'm':\n haha.append(1/60)\n elif i == 'h':\n haha.append(1)\n elif i == 'd':\n haha.append(24)\n else:\n haha.append(168)\n\n\n\n trial['multiplier'] = haha\n\n trial['age_num'] = pd.to_numeric(trial['age_num'], errors='coerce')\n\n trial['age_hours'] = trial['multiplier'] * trial['age_num']\n\n\n #

Analysis

\n\n\n\n #final df for metric analytics\n\n final_df = trial[['likes/views', 'age_hours', 'num_hashtags', 'num_mentions','type_photo','type_video']]\n\n final_df.head()\n\n\n\n\n\n final_df['likes/views'] = final_df['likes/views'].replace(to_replace = ',', value = '', regex=True)\n\n final_df['likes/views'] = pd.to_numeric(final_df['likes/views'])\n\n final_df.dtypes\n\n\n\n\n\n photo = final_df[final_df['type_photo'] == 1]\n\n photo = photo.rename(columns={'likes/views':'likes'})\n\n photo = photo.drop('type_video', axis=1)\n\n photocorr = photo.corr()\n\n\n \n\n\n video = final_df[final_df['type_video'] == 1]\n\n video = video.rename(columns={'likes/views':'views'})\n\n video = video.drop('type_photo', axis=1)\n\n videocorr = video.corr()\n\n\n\n\n ####### saving to excel #######\n\n df['age_hours'] = final_df['age_hours']\n df['num_hashtags'] = final_df['num_hashtags']\n df['num_mentions'] = final_df['num_mentions']\n df['type_photo'] = final_df['type_photo']\n df['type_video'] = final_df['type_video']\n\n\n with pd.ExcelWriter('clean_'+filename+'.xlsx') as writer: \n\n df.to_excel(writer, sheet_name = 'full')\n photo.to_excel(writer, sheet_name = 'photo')\n video.to_excel(writer, sheet_name = 'video')\n photocorr.to_excel(writer, sheet_name = 'photo corr')\n videocorr.to_excel(writer, sheet_name = 'video corr')\n\n","sub_path":"igcleaner2.py","file_name":"igcleaner2.py","file_ext":"py","file_size_in_byte":4621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"95895645","text":"stones = list(map(int, input().split(\",\")))\nstones.sort()\nn = len(stones)\nmin_moves = n\nmax_moves = max(stones[-1] - stones[1] - n + 2, stones[-2] - stones[0] - n + 2)\ni = 0\nfor j in range(n):\n while stones[j] - stones[i] + 1 > n:\n i += 1\n cur_stones = j - i + 1\n if cur_stones == n-1 and stones[j] - stones[i] + 1 == n - 1:\n min_moves = min(2, min_moves)\n else:\n min_moves = min(n - cur_stones, min_moves)\nans = [min_moves, max_moves]\nprint(ans)\n","sub_path":"Code/CodeRecords/2642/60694/283172.py","file_name":"283172.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"305212111","text":"# -*- coding: utf-8 -*-\n\"\"\"\n# @Time : 2020/5/19 上午11:15\n# @Author : HOY\n# @Email : huangouyan@changingedu.com\n# @File : teacher.py\n# @Software: PyCharm\n\"\"\"\nimport re\nimport time\nimport torch\nimport numpy as np\nimport pandas as pd\nfrom sklearn import metrics\nfrom utils import get_time_dif\nimport torch.nn.functional as F\nfrom importlib import import_module\nfrom sklearn.model_selection import train_test_split\nfrom models.bert import *\nfrom torch.utils.data import DataLoader, TensorDataset\n\nPAD, CLS = '[PAD]', '[CLS]'\n\n\n# 预测教师模型输出结果\ndef teacher_predict(x,y):\n\n config = Config('data')\n\n test_data = load_embed(x, y)\n\n model = Model(config).to(config.device)\n model.load_state_dict(torch.load(config.save_path))\n total_params = sum(p.numel() for p in model.parameters())\n print(f'{total_params:,} total parameters.')\n model.eval()\n\n predict_all = np.array([], dtype=int)\n p = []\n\n with torch.no_grad():\n for a,b,c, labels in test_data:\n outputs = model([a,b,c])\n predic = torch.max(outputs, 1)[1].cpu().numpy()\n predict_all = np.append(predict_all, predic)\n p.append(outputs)\n # return p\n return predict_all, p\n\n\ndef load_embed(x, y, pad_size=32):\n contents_token_ids = []\n contents_seq_len = []\n contents_mask = []\n contents_labels = []\n\n re_data = []\n for i in x:\n i_re = ''.join(re.findall(r'[A-Za-z0-9\\u4e00-\\u9fa5]', i))\n re_data.append(i_re.strip())\n tokenizer = BertTokenizer.from_pretrained('./bert_pretrain')\n for content,label in zip(re_data,y):\n # content = ''.join(re.findall(r'[A-Za-z0-9\\u4e00-\\u9fa5]', x)).strip()\n\n token = tokenizer.tokenize(content)\n token = [CLS] + token\n seq_len = len(token)\n mask = []\n token_ids = tokenizer.convert_tokens_to_ids(token)\n if pad_size:\n if len(token) < pad_size:\n mask = [1] * len(token_ids) + [0] * (pad_size - len(token))\n token_ids += ([0] * (pad_size - len(token)))\n else:\n mask = [1] * pad_size\n token_ids = token_ids[:pad_size]\n seq_len = pad_size\n contents_labels.append(int(label))\n contents_token_ids.append(token_ids)\n contents_seq_len.append(seq_len)\n contents_mask.append(mask)\n\n contents_labels = torch.LongTensor(contents_labels)\n contents_token_ids = torch.LongTensor(contents_token_ids)\n contents_seq_len = torch.LongTensor(contents_seq_len)\n contents_mask = torch.LongTensor(contents_mask)\n\n train_loader = DataLoader(TensorDataset(contents_token_ids,contents_seq_len,contents_mask,contents_labels), batch_size=64)\n\n return train_loader\n\n\ndef teacher_train(config, model, train_iter, dev_iter):\n start_time = time.time()\n model.train()\n optimizer = torch.optim.Adam(model.parameters(), lr=config.learning_rate)\n # model.load_state_dict(torch.load('data/saved_dict/xlnet.ckpt'))\n total_batch = 0 # 记录进行到多少batch\n dev_best_loss = float('inf')\n last_improve = 0 # 记录上次验证集loss下降的batch数\n flag = False # 记录是否很久没有效果提升\n model.train()\n for epoch in range(config.num_epochs):\n print('Epoch [{}/{}]'.format(epoch + 1, config.num_epochs))\n for i, (a,b,c, labels) in enumerate(train_iter):\n # print(total_batch)\n outputs = model([a,b,c])\n model.zero_grad()\n loss = F.cross_entropy(outputs, labels)\n loss.backward()\n optimizer.step()\n # for name, w in model.named_parameters():\n # if w.requires_grad:\n # print(name)\n if total_batch % 10 == 0:\n # 每多少轮输出在训练集和验证集上的效果\n true = labels.data.cpu()\n predic = torch.max(outputs.data, 1)[1].cpu()\n train_acc = metrics.accuracy_score(true, predic)\n dev_acc, dev_loss = teacher_evaluate(config, model, dev_iter)\n if dev_loss < dev_best_loss:\n dev_best_loss = dev_loss\n torch.save(model.state_dict(), config.save_path)\n improve = '*'\n last_improve = total_batch\n else:\n improve = ''\n time_dif = get_time_dif(start_time)\n msg = 'Iter: {0:>6}, Train Loss: {1:>5.2}, Train Acc: {2:>6.2%}, Val Loss: {3:>5.2}, Val Acc: {4:>6.2%}, Time: {5} {6}'\n print(msg.format(total_batch, loss.item(), train_acc, dev_loss, dev_acc, time_dif, improve))\n model.train()\n total_batch += 1\n if total_batch - last_improve > config.require_improvement:\n # 验证集loss超过1000batch没下降,结束训练\n print(\"No optimization for a long time, auto-stopping...\")\n flag = True\n break\n if flag:\n break\n teacher_test(config, model, dev_iter)\n\n\ndef teacher_test(config, model, test_iter):\n # test\n model.load_state_dict(torch.load(config.save_path))\n model.eval()\n start_time = time.time()\n test_acc, test_loss, test_report, test_confusion = teacher_evaluate(config, model, test_iter, test=True)\n msg = 'Test Loss: {0:>5.2}, Test Acc: {1:>6.2%}'\n print(msg.format(test_loss, test_acc))\n print(\"Precision, Recall and F1-Score...\")\n print(test_report)\n print(\"Confusion Matrix...\")\n print(test_confusion)\n time_dif = get_time_dif(start_time)\n print(\"Time usage:\", time_dif)\n\n\ndef teacher_evaluate(config, model, data_iter, test=False):\n model.eval()\n loss_total = 0\n predict_all = np.array([], dtype=int)\n labels_all = np.array([], dtype=int)\n with torch.no_grad():\n for a,b,c, labels in data_iter:\n # print(texts)\n outputs = model([a,b,c])\n loss = F.cross_entropy(outputs, labels)\n loss_total += loss\n labels = labels.data.cpu().numpy()\n predic = torch.max(outputs.data, 1)[1].cpu().numpy()\n labels_all = np.append(labels_all, labels)\n predict_all = np.append(predict_all, predic)\n acc = metrics.accuracy_score(labels_all, predict_all)\n if test:\n # data = pd.DataFrame(columns=('label','pred'))\n # data['label'] = labels_all\n # data['pred'] = predict_all\n # data.to_csv('pred.csv', encoding=\"utf_8_sig\")\n report = metrics.classification_report(labels_all, predict_all, target_names=config.class_list, digits=4)\n confusion = metrics.confusion_matrix(labels_all, predict_all)\n return acc, loss_total / len(data_iter), report, confusion\n return acc, loss_total / len(data_iter)\n","sub_path":"teacher.py","file_name":"teacher.py","file_ext":"py","file_size_in_byte":6832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"339771009","text":"from sciencelogic.performance_data import PerformanceData\n\n\nclass Device(object):\n \"\"\"\n Represents a monitored device\n \"\"\"\n\n def __init__(self, device, uri, client,\n has_details=False, fetch_details=False):\n \"\"\"\n Instantiate a new Device object\n\n :param device: A dict from the /api/device request\n :type device: ``dict``\n\n :param client: The API client\n :type client: :class:`Client`\n\n \"\"\"\n self._client = client\n self.uri = uri\n\n if not isinstance(device, dict):\n raise TypeError(\"Device is not a valid dict\")\n\n if has_details:\n self.description = device['name']\n else:\n self.description = device['description']\n if not has_details and fetch_details:\n self._fill_details()\n else:\n self.details = device\n\n def __repr__(self):\n return self.description\n\n def _fill_details(self):\n \"\"\"\n Get the detailed information about the device\n \"\"\"\n device = self._client.get(self.uri)\n self.details = device.json()\n\n def performance_counters(self):\n \"\"\"\n Get a list of performance counters for this device\n\n :rtype: ``list`` of :class:`PerformanceData`\n \"\"\"\n if self.details is None:\n self._fill_details()\n counters = []\n uri = self.details['performance_data']['URI']\n for u_data in self._client.get(uri).json()['result_set']:\n counters.append(PerformanceData(self._client, u_data))\n return counters\n","sub_path":"sciencelogic/device.py","file_name":"device.py","file_ext":"py","file_size_in_byte":1607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"500784942","text":"import torch \nimport torch.nn as nn\nimport torchvision.datasets as dsets\nimport torchvision.transforms as transforms\nfrom torch.autograd import Variable\nimport numpy as np\n\nnum_epochs = 40\nbatch_size = 200\nlearning_rate = 0.001\ncurrent = 'logos'\n\ntrain_dataset = dsets.ImageFolder(root='./workspace/'+current+'/train/', \n transform=transforms.ToTensor())\ntest_dataset = dsets.ImageFolder(root='./workspace/'+current+'/test/', \n transform=transforms.ToTensor())\n\n\ntrain_loader = torch.utils.data.DataLoader(dataset=train_dataset,\n batch_size=batch_size, \n shuffle=True)\n\n\ntest_loader = torch.utils.data.DataLoader(dataset=test_dataset,\n batch_size=batch_size, \n shuffle=False)\n\n#COVNET Architecture\n#This example contains the following architecture:- Conv->ReLU->Pool->Conv->ReLu->Pool->FC\nclass CNN(nn.Module):\n def __init__(self):\n super(CNN, self).__init__()\n self.layer1 = nn.Sequential(\n nn.Conv2d(3, 16, kernel_size=5, padding=2),\n nn.BatchNorm2d(16),\n nn.ReLU(),\n nn.MaxPool2d(2))\n self.layer2 = nn.Sequential(\n nn.Conv2d(16, 32, kernel_size=5, padding=2),\n nn.BatchNorm2d(32),\n nn.ReLU(),\n nn.MaxPool2d(2))\n self.fc1 = nn.Linear(7*7*32, 100)\n self.fc2 = nn.Linear(100, 10)\n \n def forward(self, x):\n out = self.layer1(x)\n out = self.layer2(out) \n out = out.view(out.size(0), -1)\n out = self.fc1(out)\n out = self.fc2(out)\n return out\n \ncnn = CNN()\n\n\ncriterion = nn.CrossEntropyLoss()\noptimizer = torch.optim.Adam(cnn.parameters(), lr=learning_rate)\n\n\nfor epoch in range(num_epochs):\n for i, (images, labels) in enumerate(train_loader):\n images = Variable(images)\n labels = Variable(labels)\n \n \n optimizer.zero_grad()\n outputs = cnn(images)\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n \n if (100) % 100 == 0:\n print ('Epoch [%d/%d], Iter [%d/%d] Loss: %.4f' \n %(epoch+1, num_epochs, i+1, len(train_dataset)//batch_size, loss.data[0]))\n\n\n\n\ncnn.eval() \ncorrect = 0\ntotal = 0\nfor images, labels in test_loader:\n images = Variable(images)\n outputs = cnn(images)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum()\n\nprint('Test Accuracy of the model on the test images: %d %%' % (100 * correct / total))\n\ncnn.eval() \ncorrect = 0\ntotal = 0\nfor images, labels in train_loader:\n images = Variable(images)\n outputs = cnn(images)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum()\n\nprint('Test Accuracy of the model on the training images: %d %%' % (100 * correct / total))\n\n\n#torch.save(cnn.state_dict(), './models/'+current+'/cnn.pkl')\n","sub_path":"CNN-Image Classification/pytorch_cnn_train.py","file_name":"pytorch_cnn_train.py","file_ext":"py","file_size_in_byte":3118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"232334495","text":"import numpy as np\nfrom scipy import stats\nfrom scipy.optimize import fsolve\nimport cv2 as cv\nimport math\nimport sys\nfrom array import array\nimport runMotor\nimport steering\n\nfrom gpiozero import LED\nimport RPi.GPIO as GPIO\n\nGPIO.setmode(GPIO.BCM)\n\nGPIO.setup(18, GPIO.OUT)\nGPIO.setup(17, GPIO.OUT)\n\nGPIO.output(18, GPIO.HIGH)\nGPIO.output(17, GPIO.HIGH)\n\n#left_led = LED(18)\n#right_led = LED(17)\n\n# Import Adafruit Motor HAT Library\nfrom Adafruit_MotorHAT import Adafruit_MotorHAT, Adafruit_DCMotor\n# Import additional libraries that support MotorHAT\nimport time\nimport atexit\n\n# create a default MotorHAT object, no changes to I2C address or frequency\nmh = Adafruit_MotorHAT(addr=0x60)\nlmotor = mh.getMotor(1)\nrmotor = mh.getMotor(2)\n\nleft_speed = 28000\nright_speed = 29500\n\n\ndef turnOffMotors():\n mh.getMotor(1).run(Adafruit_MotorHAT.RELEASE)\n mh.getMotor(2).run(Adafruit_MotorHAT.RELEASE)\n\natexit.register(turnOffMotors)\n\n\ndef region_of_interest(img, vertices):\n mask = np.zeros_like(img)\n match_mask_color = 255\n cv.fillPoly(mask, vertices, match_mask_color)\n masked_image = cv.bitwise_and(img, mask)\n return masked_image\n\ndef draw_lines(img, lines, color=[255, 0, 0], thickness=3):\n line_img = np.zeros(\n (\n img.shape[0],\n img.shape[1],\n 3\n ),\n dtype=np.uint8\n )\n img = np.copy(img)\n if lines is None:\n return\n for line in lines:\n for x1, y1, x2, y2 in line:\n cv.line(line_img, (x1, y1), (x2, y2), color, thickness)\n img = cv.addWeighted(img, 0.8, line_img, 1.0, 0.0)\n return img\n\n\ndef perp( a ) :\n b = np.empty_like(a)\n b[0] = -a[1]\n b[1] = a[0]\n return b\n\n# line segment a given by endpoints a1, a2\n# line segment b given by endpoints b1, b2\n# return\n\ndef auto_canny(image, sigma=0.33):\n\t# compute the median of the single channel pixel intensities\n\tv = np.median(image)\n\n\t# apply automatic Canny edge detection using the computed median\n\tlower = int(max(0, (1.0 - sigma) * v))\n\tupper = int(min(255, (1.0 + sigma) * v))\n\tedged = cv.Canny(image, lower, upper)\n\n\t# return the edged image\n\treturn edged\n\n\ndef seg_intersect(a1,a2, b1,b2) :\n da = a2-a1\n db = b2-b1\n dp = a1-b1\n dap = perp(da)\n denom = np.dot( dap, db)\n num = np.dot( dap, dp )\n return (num / denom.astype(float))*db + b1\n\nleft_lines_x_buffer = []\nleft_lines_y_buffer = []\nright_lines_x_buffer = []\nright_lines_y_buffer = []\n\ndef pipeline(image):\n\n gray_image = cv.cvtColor(image, cv.COLOR_BGR2GRAY)\n #gray_image = cv.resize(gray_image, (320, 240))\n\n blur_image = cv.GaussianBlur(gray_image, (3, 3), 0)\n\n cv.imwrite('blur.png', blur_image)\n\n cannyed_image = auto_canny(blur_image)\n cv.imwrite('canny.png', cannyed_image)\n\n width = cannyed_image.shape[1] - 40\n height = cannyed_image.shape[0] - 40\n region_of_interest_vertices = [\n (0, height),\n (0, height - 20),\n (width / 4, 200),\n (width * 3 / 4, 200),\n (width, height - 20),\n (width, height)\n ]\n\n cropped_image = region_of_interest(cannyed_image, np.array([region_of_interest_vertices], np.int32))\n cv.imwrite('croppedimage.png', cropped_image)\n\n lines = cv.HoughLinesP(\n cropped_image,\n rho=6,\n theta=np.pi / 60,\n threshold=100,\n lines=np.array([]),\n minLineLength=60,\n maxLineGap=40\n )\n\n lined_image = draw_lines(image, lines)\n all_line_out.write(lined_image)\n\n\n left_line_x = []\n left_line_y = []\n right_line_x = []\n right_line_y = []\n\n for line in lines:\n for x1, y1, x2, y2 in line:\n try:\n slope = float((y2 - y1)) / float((x2 - x1)) # <-- Calculating the slope\n except:\n continue\n if math.fabs(slope) < 0.5: # <-- Only consider extreme slope\n continue\n if slope <= 0: # <-- If the slope is negative, left group.\n left_line_x.extend([x1, x2])\n left_line_y.extend([y1, y2])\n else: # <-- Otherwise, right group\n right_line_x.extend([x1, x2])\n right_line_y.extend([y1, y2])\n\n\n\n if len(left_lines_x_buffer) > 1:\n left_lines_x_buffer.pop(0)\n left_lines_y_buffer.pop(0)\n right_lines_x_buffer.pop(0)\n right_lines_y_buffer.pop(0)\n\n left_lines_x_buffer.append(left_line_x)\n left_lines_y_buffer.append(left_line_y)\n right_lines_x_buffer.append(right_line_x)\n right_lines_y_buffer.append(right_line_y)\n\n left_line_x = []\n left_line_y = []\n right_line_x = []\n right_line_y = []\n\n for set in left_lines_x_buffer:\n for value in set:\n left_line_x.append(value)\n\n for set in left_lines_y_buffer:\n for value in set:\n left_line_y.append(value)\n\n for set in right_lines_x_buffer:\n for value in set:\n right_line_x.append(value)\n\n for set in right_lines_y_buffer:\n for value in set:\n right_line_y.append(value)\n\n min_y = 0\n max_y = image.shape[1] # <-- The bottom of the image\n\n left_slope, left_intercept, r_value, p_value, std_err = stats.linregress(left_line_x,left_line_y)\n\n poly_left = np.poly1d([left_slope, left_intercept])\n\n left_x_start = int(poly_left(max_y))\n\n left_x_end = int(poly_left(min_y))\n\n\n right_slope, right_intercept, r_value, p_value, std_err = stats.linregress(right_line_x,right_line_y)\n\n poly_right = np.poly1d([right_slope, right_intercept])\n\n\n\n\n\n\n right_x_start = int(poly_right(max_y))\n\n right_x_end = int(poly_right(min_y))\n\n\n\n intersect_point = seg_intersect(np.array([0, int(poly_left(0))]), np.array([image.shape[1], int(poly_left(image.shape[1]))]), np.array([0, int(poly_right(0))]), np.array([image.shape[1], int(poly_right(image.shape[1]))]))\n\n base_midpoint = (((poly_right(image.shape[0]) - poly_left(image.shape[0])) / 2) + poly_left(image.shape[0]), image.shape[0])\n\n combined_line_image = draw_lines(\n image,\n [[\n [left_x_start, max_y, left_x_end, min_y],\n [right_x_start, max_y, right_x_end, min_y],\n ]],\n thickness=5,\n )\n\n left_line_image = draw_lines(image, [[\n [right_x_start, max_y, right_x_end, min_y]\n ]],\n thickness=5\n )\n\n\n left_base_point = (int((1 / left_slope) * (image.shape[0] - left_intercept)), image.shape[0])\n right_base_point = (int((1 / right_slope) * (image.shape[0] - right_intercept)), image.shape[0])\n\n base_midpoint = ((right_base_point[0] - left_base_point[0]) / 2 + left_base_point[0], image.shape[0])\n\n midline_slope = (base_midpoint[1] - intersect_point[1]) / (base_midpoint[0] - intersect_point[0])\n\n for x in range(0, image.shape[1]):\n cv.circle(left_line_image, (x, int(poly_right(x))), 1, (0, 0, 255))\n\n combined_line_image = draw_lines(image,\n [[\n [0, int(poly_left(0)), image.shape[1], int(poly_left(image.shape[1]))],\n [0, int(poly_right(0)), image.shape[1], int(poly_right(image.shape[1]))],\n [int(intersect_point[0]), int(intersect_point[1]), int(base_midpoint[0]), int(base_midpoint[1])]\n ]])\n\n cv.circle(combined_line_image, (int(intersect_point[0]), int(intersect_point[1])), 10, (0, 0, 255))\n cv.circle(combined_line_image, (int(base_midpoint[0]), int(base_midpoint[1])), 10, (0, 0, 255))\n cv.circle(combined_line_image, left_base_point, 10, (0, 0, 255))\n cv.circle(combined_line_image, right_base_point, 10, (0, 0, 255))\n\n return(combined_line_image, intersect_point)\n\ncap = cv.VideoCapture(0)\n\nfourcc = cv.cv.CV_FOURCC(*'MJPG')\nfinal_line_out = cv.VideoWriter('final_line_out.mjpg',fourcc, 20.0, (640,480))\n\nall_line_out = cv.VideoWriter('all_line_out.mjpg', fourcc, 20.0, (640,480))\n\nlines_buffer = []\n\nwhile True:\n intersect_point = [0, 0]\n ret, frame = cap.read()\n\n processed_frame = frame\n\n try:\n processed_frame, intersect_point = pipeline(frame)\n cv.imwrite('lanedetection.png', processed_frame)\n #cv.imshow('lane detection', processed_frame)\n\n except Exception:\n print(\"Fail\")\n\n final_line_out.write(processed_frame)\n speed = 17500\n\n width = processed_frame.shape[1]\n\n steering.setSpeed(14000)\n\n veerCorrection = 200\n lTurnValue = 1400\n rTurnValue = 200\n sleep = 0\n if intersect_point[0] != 0:\n print(abs(intersect_point[0] - width / 2))\n if abs(intersect_point[0] - width / 2) < 20:\n print('straight')\n steering.steer(0)\n elif intersect_point[0] < (processed_frame.shape[1] / 2):\n print('turn left')\n\n GPIO.output(18, GPIO.HIGH)\n GPIO.output(17, GPIO.LOW)\n\n #correctionAmnt = int((abs((width / 2) - intersect_point[0]) / 3)) * -1\n\n #print(correctionAmnt)\n\n steering.steer(-80)\n\n #time.sleep(sleep)\n # steering.setSpeed(0)\n\n\n\n\n elif intersect_point[0] > (processed_frame.shape[1] / 2):\n print('turn right')\n\n GPIO.output(18, GPIO.LOW)\n GPIO.output(17, GPIO.HIGH)\n\n #correctionAmnt = int((abs((width / 2) - intersect_point[0]) / 3))\n\n #print(correctionAmnt)\n\n steering.steer(80)\n #time.sleep(sleep)\n # steering.setSpeed(0)\n\n\n else:\n GPIO.output(18, GPIO.LOW)\n GPIO.output(17, GPIO.LOW)\n\n steering.setSpeed(0)\n\n\n if cv.waitKey(1) & 0xFF == ord('q'): # Break if key 'q' pressed\n break\n\ncap.release()\nfinal_line_out.release()\nall_line_out.release()\ncv.destroyAllWindows()\n","sub_path":"archive/lanedetection.py","file_name":"lanedetection.py","file_ext":"py","file_size_in_byte":9586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"396407794","text":"\"\"\"\nImplement an algorithm to find the kth to last element of a singly linked list.\n\"\"\"\n\nimport Node\n\ndef subList(ll, k):\n if k > ll.length:\n print(\"error: k > length of linkedList\")\n return Node.LinkedList()\n n = ll.head\n r = ll.length - k\n for i in range(0, r):\n n = n.next\n newLL = Node.LinkedList()\n newLL.head = n\n return newLL\n\nll = Node.LinkedList()\nll.addStr(\"ABCDEFG\")\nll.print()\nnewLL = subList(ll, 3)\nll.print()\nnewLL.print()\n","sub_path":"ch2/2.2.py","file_name":"2.2.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"613641037","text":"from wordfreq import zipf_frequency, tokenize\nfrom googletrans import Translator\n\n\ndef process_sub(text, file_type, score):\n important_words = {}\n if file_type == '.srt':\n text = text.decode('iso-8859-1')\n text = text.split('\\r\\n\\r\\n')\n for i, segment in enumerate(text):\n segment = segment.split('\\r\\n')[2:]\n for line in segment:\n line = tokenize(line, 'en')\n for word in line:\n if score / 1.05 < zipf_frequency(word, 'en') < score and word not in important_words:\n important_words[word] = i\n translator = Translator()\n ready_words = list(important_words.keys())\n result = translator.translate(ready_words, src='en', dest='fa')\n modified_segments = {}\n for res in result:\n seg_num = important_words[res.origin]\n if seg_num in modified_segments:\n modified_segments[seg_num].append([res.origin, res.text])\n else:\n modified_segments[seg_num] = [[res.origin, res.text]]\n new_text = \"\"\n for i, segment in enumerate(text):\n if i in modified_segments:\n to_add = []\n for word, trans in modified_segments[i]:\n segment = segment.replace(word, '{0}'.format(word), 1)\n to_add.append(\"{0}:{1}\".format('{0}'.format(word), trans))\n to_add = \"({0})\".format(\" \".join(to_add))\n new_text += segment\n new_text += \"\\r\\n\"\n new_text += to_add\n new_text += \"\\r\\n\\r\\n\"\n else:\n new_text += segment\n new_text += \"\\r\\n\\r\\n\"\n answer = [[res.origin, res.text] for res in result]\n return answer, new_text\n","sub_path":"src/subtitles/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":1873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"499898855","text":"class Dog:\n species = \"Canis familaris\"\n\n def __init__(self, breed, name, age):\n self.breed = breed\n self.name = name\n self.age = age\n\ndef main():\n awoo = Dog(\"Beagle\", \"Toby\", 3)\n print(awoo.breed, awoo.name, awoo.age)\n\n bork = Dog(\"Shiba Inu\", \"Christopher\", 5)\n print(bork.breed, bork.name, bork.age)\n\n heck = Dog(\"Irish Wolfhound\", \"Robert\", 1)\n print(heck.breed, heck.name, heck.age)\n\n print(\"Common attributes: Dog.species = \" + Dog.species)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Week12/A/dogs.py","file_name":"dogs.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"11123577","text":"import FWCore.ParameterSet.Config as cms\n\nimport os;\n\ndef setupEventSelection(process):\n tag = 'AnySelection'\n\n # rename output file\n oldname = os.path.splitext(process.out.fileName.value())[0]\n process.out.fileName = cms.untracked.string(oldname+tag+'.root')\n\n process.p = cms.Path(process.patDefaultSequence)\n","sub_path":"Analysis/MakeSkims/python/anyEventSelection_cfi.py","file_name":"anyEventSelection_cfi.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"410831201","text":"\"\"\"\n给定一个经过编码的字符串,返回它解码后的字符串。\n\n编码规则为: k[encoded_string],表示其中方括号内部的 encoded_string 正好重复 k 次。注意 k 保证为正整数。\n\n你可以认为输入字符串总是有效的;输入字符串中没有额外的空格,且输入的方括号总是符合格式要求的。\n\n此外,你可以认为原始数据不包含数字,所有的数字只表示重复的次数 k ,例如不会出现像 3a 或 2[4] 的输入。\n\n示例:\n\ns = \"3[a]2[bc]\", 返回 \"aaabcbc\".\ns = \"3[a2[c]]\", 返回 \"accaccacc\".\ns = \"2[abc]3[cd]ef\", 返回 \"abcabccdcdcdef\".\n\n来源:力扣(LeetCode)\n链接:https://leetcode-cn.com/problems/decode-string\n著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\n\"\"\"\n\n\nclass Solution:\n def mainFuc(self, s:str):\n return self.decodeString(s)\n\n # 栈\n def decodeString(self, s: str) -> str:\n stack = []\n i = 0\n res = ''\n while i < len(s):\n curstr = ''\n kcount = 0\n if s[i] == '[':\n stack.append('[')\n elif '0' <= s[i] <= '9':\n while '0' <= s[i + kcount] <= '9':\n kcount += 1\n stack.append(int(s[i:i + kcount]))\n elif 'a' <= s[i] <= 'z' or 'A' <= s[i] <= 'Z':\n if len(stack):\n stack.append(s[i])\n else:\n res += s[i]\n else:\n curc = stack.pop()\n while curc != '[':\n curstr = curc + curstr\n curc = stack.pop()\n curc = stack.pop()\n curstr *= curc\n # 若栈不空,将说明仍存在\"[]\",将curstr再次压入栈中\n if not len(stack):\n res += curstr\n else:\n stack.append(curstr)\n i += 1 if kcount == 0 else kcount\n return res\n\nif __name__ == '__main__':\n print(Solution().mainFuc(\"3[a]2[b4[F]c]\"))\n\n\n\n\n\n","sub_path":"python code/题库/394. 字符串解码.py","file_name":"394. 字符串解码.py","file_ext":"py","file_size_in_byte":2104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"192992920","text":"with open(\"input\", \"r\") as f:\n \n blocks = []\n configurations = []\n steps = 0\n N = 0\n \n for line in f:\n for block in line.split():\n blocks.append(int(block))\n N += 1\n \n while (str(blocks) not in configurations):\n \n configurations.append(str(blocks))\n \n count = max(blocks)\n i = blocks.index(count)\n \n blocks[i] = 0\n while (count > 0):\n i += 1\n if (i >= N):\n i = 0\n blocks[i] += 1\n count -= 1\n \n steps += 1\n\n print (\"Steps: \" + str(steps))\n","sub_path":"6/6a.py","file_name":"6a.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"559738736","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2020/1/8 11:05\n# @Author : StephenZ\n# @Site : \n# @File : Case402-fail.py\n# @Purpose :\n# @Software : PyCharm\n# @Copyright: (c) StephenZ 2020\n# @Licence : <@2019>\n\n# from typing import List\n\"\"\"\n移掉K位数字\n\"\"\"\n\n\nclass Solution:\n def removeKdigits(self, num: str, k: int) -> str:\n if k == 0:\n return num\n elif len(num) == k:\n return \"0\"\n else:\n compare_num = num[:k+1]\n print(compare_num)\n minNum = min(compare_num)\n print(\"minNum:\", minNum)\n minNumIndex = compare_num.index(minNum)\n last = num[minNumIndex+1:]\n print(\"last:\", last)\n result = minNum + self.removeKdigits(last, k-minNumIndex)\n if len(last) == k:\n print(\"111111:\", last, minNum)\n return minNum + \"\"\n\n result = str(int(result))\n print(\"result:\", result)\n return result\n\n\ndef test_solution():\n s = Solution()\n num1 = \"1432219\"\n k1 = 3\n num2 = \"10200\"\n k2 = 1\n num3 = \"112\"\n k3 = 1\n num4 = \"10\"\n k4 = 1\n num5 = \"1173\"\n k5 = 2\n num6 = \"43214321\"\n k6 = 4\n num7 = \"10\"\n k7 = 2\n\n # assert s.removeKdigits(num1, k1) == \"1219\"\n # assert s.removeKdigits(num2, k2) == \"200\"\n # assert s.removeKdigits(num3, k3) == \"11\"\n # assert s.removeKdigits(num4, k4) == \"0\"\n # assert s.removeKdigits(num5, k5) == \"11\"\n assert s.removeKdigits(num6, k6) == \"1321\"\n assert s.removeKdigits(num7, k7) == \"0\"\n\n\nif __name__ == '__main__':\n test_solution()\n","sub_path":"Case/Case402-fail.py","file_name":"Case402-fail.py","file_ext":"py","file_size_in_byte":1635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"15643693","text":"import io\nimport os\nimport tarfile\nimport hashlib\n\nfrom django.core.files import File\nfrom docker.api.container import ContainerApiMixin\n\n\ndef put_file(container: ContainerApiMixin, src: File, dest: str) -> ():\n \"\"\"\n Puts a file on the host into a container.\n\n This method will create an in memory tar archive, add the src file to this\n and upload it to the docker container where it will be unarchived at dest.\n\n :param container: The container to write to\n :param src: The path to the source file on the host\n :param dest: The path to the target file in the container\n :return:\n \"\"\"\n\n tar_b = io.BytesIO()\n tar = tarfile.open(fileobj=tar_b, mode='w')\n tarinfo = tarfile.TarInfo(name=os.path.basename(dest))\n tarinfo.size = src.size\n\n # type File does not have a __enter__ method, so cannot use `with`\n src.open('rb')\n try:\n tar.addfile(tarinfo, fileobj=src)\n finally:\n src.close()\n tar.close()\n\n tar_b.seek(0)\n container.put_archive(os.path.dirname(dest), tar_b)\n\n\ndef get_hash(file: str, hashfn: hashlib.sha256 = None) -> str:\n \"\"\"\n Calculates the hash of a file using the optional hashfn. If no hashfn is\n provided, hashlib.sha256 is used\n\n :param file: The full location of the file\n :param hashfn: The hash function to use. Default: hashlib.sha256\n :return: The hash of the file\n \"\"\"\n\n if hashfn is None:\n hashfn = hashlib.sha256\n\n m = hashfn()\n with open(file, 'rb') as f:\n m.update(f.read())\n\n return m.hexdigest()\n","sub_path":"src/staas/transfers/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"419542534","text":"import cv2\nimport matplotlib.pyplot as plt\nimport sys\nimport os\n\nimg_bgr = cv2.imread( sys.argv[1] )\nimg_rgb = cv2.cvtColor( img_bgr, cv2.COLOR_BGR2RGB )\n\nsurf = cv2.xfeatures2d.SURF_create()\n\n# detect keypoints\nkp = surf.detect( img_bgr )\n\nimport numpy as np\nimg_kp = np.zeros_like( img_bgr )\nimg_kp = cv2.drawKeypoints( img_rgb, kp, img_kp, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS )\n\nplt.clf()\nplt.imshow( img_kp )\nFILESAVE = 'TEST_IMG_5.pdf'\nplt.savefig(FILESAVE)\nos.system('pdfcrop ' + FILESAVE + ' ' + FILESAVE)\n\n# feature descriptor can be computed using\nkp, des = surf.compute( img_bgr, kp )\n\nprint('des.shape = ', des.shape)\n\n# alternative: detect and compute\n# kp2, des2 = sift.detectAndCompute( img_bgr, None )\n","sub_path":"cv2/04_test_surf.py","file_name":"04_test_surf.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"608940696","text":"#written by Michael Sharp. Last revision 5/15/2014\nimport csv\nimport copy\nimport os\nimport sys\n\n#complex data structure for holding the row data\nclass for_csv:\n #needs the data and the column names to create it\n def __init__(self,info,column_names):\n self.info = info\n self.column_names = column_names\n def __str__(self):\n string =\"\"\n for x in self.info:\n string+=str(x)\n string+=','\n string = string[:-1]\n return string\n #for adding two csv lines together\n def add(self, other_csv):\n temp = other_csv.getAllData()\n for x in self.column_names:\n count = self.column_names.index(x)\n try:\n self.info[count]=int(int(self.info[count]) +int(temp[count]))\n except:\n self.info[count]+=str(temp[count])\n return self\n #returns the list that holds that data \n def getAllData(self):\n return self.info\n #lets you set the data, pass in the name of the column you want to change and what you want it to be \n def setData(self,column_name,change_to_what):\n count = self.column_names.index(column_name)\n try:\n self.info[count] = int(change_to_what)\n except:\n self.info[count] =str(change_to_what)\n return\n #lets you change the data, pass in the name of the column and how much you want it to change by \n def modData(self,column_name,how_much):\n count = self.column_names.index(column_name)\n try:\n self.info[count] += int(how_much)\n except:\n self.info[count] +=str(how_much)\n return\n #returns just one peice of the data based on the column name you pass in \n def getData(self,column_name):\n return self.info[self.column_names.index(column_name)]\n \n#main program\ndef main(argv):\n #if op temp parameter is not passed, defaults to 10\n if len(sys.argv) == 1:\n OP_TEMP_WINDOW = 10\n else:\n OP_TEMP_WINDOW = int(sys.argv[1])\n #finds current directory\n loc = os.getcwd()\n loc=os.path.join(loc,\"src\",\"csvFiles\")\n first = []\n old_root = \"\"\n #itterates through all the files in the directory\n for root, dirs, files in os.walk(loc):\n total = {}\n for file in files:\n old_root = root\n #prints the file it is working on\n print (os.path.join(root,file)) \n #if its not a csv file it skips it\n if \".csv\" not in file:\n continue\n #if total is in the name it skips it, this is to prevent doubling up on the totals file if its not deleted.\n if \"Total\" in file:\n os.remove(os.path.join(root,file))\n continue\n #opens the csv file to work on it\n with open(os.path.join(root,file),'rb') as csvfile:\n reader = csv.reader(csvfile, delimiter = ',')\n expand = {}\n first = reader.next()\n temp_place = 0;\n temp_for_csv = []\n for line in reader:\n expand[int(line[0])] = for_csv(line[1:],first[1:])\n if (int(line[0]) - temp_place) >1:\n #this loop expands the data, and sets to 0 what should be set to zero\n for x in range(temp_place+1,int(line[0])):\n expand[x] = copy.deepcopy(for_csv(temp_for_csv,first[1:]))\n expand[x].setData('Op Tempo',0)\n expand[x].setData(\"Total Transitions\",0)\n expand[x].setData(\"Transition Durations (Actor State [TransitionDuration])*\",\"\")\n expand[x].setData(\"Enabled Transitions (Actor State [NumberOfEnabledTransitions])*\",\"\")\n expand[x].setData(\"Total Enabled Transitions\",0)\n \n temp_for_csv = line[1:]\n temp_place = int(line[0]) \n op_list = []\n for x in sorted(expand):\n #this fixes the total transitions so they dont spike, but so they put one ball in the bucket for each time unit\n if expand[x].getData(\"Total Transitions\") > 0:\n for y in range (x-int(expand[x].getData(\"Total Transitions\")),x):\n expand[y].modData(\"Total Transitions\",1)\n expand[x].setData(\"Total Transitions\",0)\n #fixes op tempo so it is correct\n if int(expand[x].getData(\"Op Tempo\")) > 0:\n op_list.append(x)\n for y in op_list:\n if (x-y) > OP_TEMP_WINDOW:\n op_list.remove(y)\n expand[x].setData(\"Op Tempo\",len(op_list))\n #prints out the file as a new csv in the same location as the original. It overwrites the original\n with open(os.path.join(root,file),'wb') as outcsv:\n out = csv.writer(outcsv) \n out.writerow(first)\n for x in sorted(expand):\n temp = [x]\n temp.extend(expand[x].getAllData())\n out.writerow(temp) \n if x in total:\n total[x]=total[x].add(expand[x])\n else:\n total[x] = copy.deepcopy(expand[x]) \n #prints out the totals file. \n if len(total) >0: \n with open(os.path.join(old_root,\"Total.csv\"),'wb') as outcsv:\n out = csv.writer(outcsv) \n out.writerow(first)\n for x in sorted(total):\n temp = [x]\n temp.extend(total[x].getAllData())\n out.writerow(temp)\nif __name__ ==\"__main__\":\n main(sys.argv[1:])","sub_path":"PostProcessing.py","file_name":"PostProcessing.py","file_ext":"py","file_size_in_byte":5981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"70629352","text":"import win32clipboard as wc\nimport win32con\nclass Simulate_Clipboard:\n\n #读取剪切板\n @staticmethod\n def get_clipboard():\n #打开剪切板\n wc.OpenClipboard()\n #获取剪切板数据\n data = wc.GetClipboardData(win32con.CF_TEXT)\n #关闭剪切板\n wc.CloseClipboard()\n return data\n\n #设置剪切板内容\n @staticmethod\n def set_clipboard(content):\n wc.OpenClipboard()\n #清空剪切板\n wc.EmptyClipboard()\n #将数据astring写入剪切板\n wc.SetClipboardData(win32con.CF_UNICODETEXT, content)\n wc.CloseClipboard()","sub_path":"Util/Clipboard_Simulation.py","file_name":"Clipboard_Simulation.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"468354106","text":"import random\nfrom Genotyp import root_genes, stalk_genes, leaves_genes, stalk_creations_genes, leaves_color_genes\n\n\ndef mutate(plant: list) -> list:\n gen_to_mutate = random.randint(0, 6)\n tmp = plant[gen_to_mutate]\n\n if gen_to_mutate == 0:\n index = root_genes.index(plant[0])\n plant[0] = root_genes[(index + 1) % 2]\n\n if gen_to_mutate == 1:\n index = stalk_genes.index(plant[1])\n plant[1] = stalk_genes[(index + 1) % 2]\n\n if gen_to_mutate == 2:\n index = leaves_genes.index(plant[2])\n plant[2] = leaves_genes[(index + 1) % 4]\n\n if gen_to_mutate == 3:\n index = stalk_creations_genes.index(plant[3])\n plant[3] = stalk_creations_genes[(index + 1) % 4]\n\n if gen_to_mutate == 4:\n index = leaves_color_genes.index(plant[4])\n plant[4] = leaves_color_genes[(index + 1) % 4]\n\n if gen_to_mutate in [5, 6]:\n wsp = random.randint(5, 20) / 10\n plant[gen_to_mutate] = round(plant[gen_to_mutate] * wsp, 2)\n\n print(f\"mutacja genu: {gen_to_mutate} z {tmp} na {plant[gen_to_mutate]}\")\n\n return plant\n\n\ndef breeding(parent_1: list, parent_2: list) -> list:\n new_plant_1 = []\n new_plant_2 = []\n\n for i in range(3):\n new_plant_1.append(parent_1[i])\n new_plant_2.append(parent_2[i])\n for j in range(2):\n new_plant_1.append(parent_2[j + 3])\n new_plant_2.append(parent_1[j + 3])\n\n rot1 = random.randint(0, 9)\n # Root Length i=5\n tmp = (rot1 * parent_2[5] + (10 - rot1) * parent_1[5]) / 10\n new_plant_1.append(round(tmp, 2))\n rot2 = random.randint(0, 9)\n tmp = (rot2 * parent_1[5] + (10 - rot2) * parent_2[5]) / 10\n new_plant_2.append(round(tmp, 2))\n\n # Stalk height i=6\n hei1 = random.randint(0, 9)\n tmp = (hei1 * parent_1[6] + (10 - hei1) * parent_2[6]) / 10\n new_plant_1.append(round(tmp, 2))\n hei2 = random.randint(0, 9)\n tmp = (hei2 * parent_2[6] + (10 - hei2) * parent_1[6]) / 10\n new_plant_2.append(round(tmp, 2))\n\n tmp = [new_plant_1, new_plant_2]\n\n mutagen = random.randint(0, 99)\n if mutagen % 10 == 0:\n plant = tmp.pop(random.randint(0, 1))\n tmp.append(mutate(plant))\n\n return tmp\n\n\ndef create_next_generation(parent_table: list) -> list:\n next_generation = list()\n\n for i in range(int(len(parent_table) / 2)):\n p1 = random.randint(0, len(parent_table) - 1)\n parent1 = parent_table.pop(p1)\n p2 = random.randint(0, len(parent_table) - 1)\n parent2 = parent_table.pop(p2)\n for plant in breeding(parent1, parent2):\n next_generation.append(plant)\n\n return next_generation\n","sub_path":"scr/reproducing.py","file_name":"reproducing.py","file_ext":"py","file_size_in_byte":2629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"570341419","text":"# SPDX-License-Identifier: MIT\n# Copyright (c) 2019 Intel Corporation\nimport os\nimport sys\nimport ast\nimport copy\nimport json\nimport asyncio\nimport inspect\nimport logging\nimport argparse\nfrom typing import Optional\n\nfrom .log import LOGGER\nfrom ...repo import Repo\nfrom ...port import Port\nfrom ...feature import Feature, Features\nfrom ...source import Source, Sources, JSONSource\n\nLOGGER = LOGGER.getChild('cli')\n\nclass ParseLoggingAction(argparse.Action):\n\n def __call__(self, parser, namespace, value, option_string=None):\n setattr(namespace, self.dest,\n getattr(logging, value.upper(), logging.INFO))\n logging.basicConfig(level=getattr(namespace, self.dest))\n\nclass Arg(dict):\n\n def __init__(self, name: str, **kwargs) -> None:\n super().__init__(**kwargs)\n self.name = name\n\n def modify(self, name: Optional[str] = None, **kwargs):\n updated = copy.copy(self)\n updated.update(kwargs)\n if not name is None:\n updated.name = name\n return updated\n\nclass JSONEncoder(json.JSONEncoder):\n '''\n Encodes dffml types to JSON representation.\n '''\n\n def default(self, obj):\n if isinstance(obj, Repo):\n return obj.dict()\n elif isinstance(obj, Feature):\n return obj.NAME\n return json.JSONEncoder.default(self, obj)\n\nclass CMD(object):\n\n JSONEncoder = JSONEncoder\n\n arg_log = Arg('-log', help='Logging level', action=ParseLoggingAction,\n required=False, default=logging.INFO)\n\n def __init__(self, **kwargs) -> None:\n self.logger = logging.getLogger('%s.%s' % (self.__class__.__module__,\n self.__class__.__qualname__))\n for name, method in [(name.lower().replace('arg_', ''), method) \\\n for name, method in inspect.getmembers(self) \\\n if isinstance(method, Arg)]:\n if not name in kwargs and method.name in kwargs:\n name = method.name\n if not name in kwargs and 'default' in method:\n kwargs[name] = method['default']\n if name in kwargs:\n self.logger.debug('Setting %s = %r', name, kwargs[name])\n setattr(self, name, kwargs[name])\n else:\n self.logger.debug('Ignored %s', name)\n\n async def __aenter__(self):\n pass\n\n async def __aexit__(self, exc_type, exc_value, traceback):\n pass\n\n @classmethod\n async def parse_args(cls, *args):\n parser = Parser()\n parser.add_subs(cls)\n return parser, parser.parse_args(args)\n\n @classmethod\n async def cli(cls, *args):\n self = cls()\n parser, args = await self.parse_args(*args)\n if getattr(args, 'cmd', None) is None:\n parser.print_help()\n return None\n if getattr(args.cmd, 'run', None) is None:\n args.parser.print_help()\n return None\n cmd = args.cmd(**self.sanitize_args(vars(args)))\n async with cmd:\n if inspect.isasyncgenfunction(cmd.run):\n return [res async for res in cmd.run()]\n else:\n return await cmd.run()\n\n def sanitize_args(self, args):\n '''\n Remove CMD internals from arguments passed to subclasses of CMD.\n '''\n for rm in ['cmd', 'parser', 'log']:\n if rm in args:\n del args[rm]\n return args\n\n @classmethod\n def main(cls, loop=asyncio.get_event_loop(), argv=sys.argv):\n '''\n Runs cli commands in asyncio loop and outputs in appropriate format\n '''\n result = None\n try:\n result = loop.run_until_complete(cls.cli(*argv[1:]))\n except KeyboardInterrupt: # pragma: no cover\n pass # pragma: no cover\n loop.run_until_complete(loop.shutdown_asyncgens())\n loop.close()\n if not result is None:\n json.dump(result, sys.stdout, sort_keys=True, indent=4,\n separators=(',', ': '), cls=cls.JSONEncoder)\n print()\n\nclass Parser(argparse.ArgumentParser):\n\n def add_subs(self, add_from: CMD):\n '''\n Add sub commands and arguments recursively\n '''\n # Only one subparser should be created even if multiple sub commands\n subparsers = None\n for name, method in [(name.lower().replace('_', ''), method) \\\n for name, method in inspect.getmembers(add_from)]:\n if inspect.isclass(method) and issubclass(method, CMD):\n if subparsers is None: # pragma: no cover\n subparsers = self.add_subparsers() # pragma: no cover\n parser = subparsers.add_parser(name, help=None \\\n if method.__doc__ is None else method.__doc__.strip())\n parser.set_defaults(cmd=method)\n parser.set_defaults(parser=parser)\n parser.add_subs(method) # type: ignore\n elif isinstance(method, Arg):\n self.add_argument(method.name, **method)\n","sub_path":"dffml/util/cli/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":5089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"598994426","text":"ifile = open(\"input.txt\", \"r\")\nilist = []\nlinecount = 0\n#for loop for each line in opened file 'ifile'; line = the current line\nfor line in ifile:\n ilist.append([])\n ilist[linecount] = line\n #splits current line into the numbers separated by the 'x's in each line. 'splitline' becomes an array.\n ilist[linecount].rstrip('\\n')\n linecount+=1\nifile.close()\n\nniceWords = 0\n#must contain at least 3 vowels\n#must contain at least one letter that appears twice in a row\n#must not contain the strings 'ab', 'cd', 'pq', or 'xy'\nfor i in range(len(ilist)):\n cString = ilist[i]\n repChk = 0\n pairChk = 0\n pairCt = 0\n pLetter = \"\"\n p2Letter = \"\"\n p3Letter = \"\"\n cLetter = \"\"\n noChk = 0\n for j in range(len(cString)):\n cLetter = cString[j]\n if j-1 > -1:\n pLetter = cString[j-1]\n if j-2 > -1:\n p2Letter = cString[j-2]\n if j-3 > -1:\n p3Letter = cString[j-3]\n #checks for repeated characters separated by 1 character\n if repChk != 1 and p2Letter != \"\" and p2Letter == cLetter:\n repChk = 1\n #checks for repeated pairs of characters\n\n\n\n\n\n \n if repChk == 1 and pairChk == 1:\n niceWords += 1\n print(cString)\n break\nprint(niceWords)\n","sub_path":"Day 5/Day5Part2.py","file_name":"Day5Part2.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"281912857","text":"import random, math\nx = [[0,0],[1,0],[0,1],[1,1]]\ny = [0,1,1,0]\naccuracy = 5\nclass Node:\n inputs = []\n outputs = []\n nodes = []\n def inp(count):\n for x in range (0,count):\n Node.inputs.append(Node(-1))\n def output(count):\n for x in range (0,count):\n Node.outputs.append(Node(len(nodes) + 1))\n def create(length,width):\n for x in range(0,width):\n tmp = []\n for y in range(0,length):\n \n tmp.append(Node(x))\n Node.nodes.append(tmp)\n def connect(self):\n print(str(self.pos))\n print(Node.nodes[self.pos])\n if self.pos < 1:\n \n for x in range(0, len(Node.inputs)):\n tmp = connection(Node.inputs[x], self)\n self.connected.append(tmp)\n Node.inputs[x].connections = tmp\n else:\n \n for x in range(0, len(Node.nodes[self.pos])):\n tmp = connection(Node.nodes[pos-1][x], self)\n self.connected.append(tmp)\n Node.nodes[pos-1][x].connections = tmp\n \n\n def __init__(self, position):\n Node.nodes.append(self)\n self.connections = []\n self.connected = []\n self.pos = position\n self.connect()\n #Math functions\n def sigmoid(self):\n return (1/(1+math.exp(-sum(self.connected))))\n def average(self):\n return (sum(self.connected)/len(self.connected))\nclass connection():\n def __init__(self,inp,out):\n self.fm = inp\n self.to = out\n self.weight = random.random()\n def change(direction):\n if direction > 0:\n for x in range(1,accuracy):\n tmp = wieght + (1 / math.pow(10,x))\n if tmp < 1:\n break\n if x == 5:\n tmp = 1\n weight = tmp\n if direction < 0:\n for x in range(1,accuracy):\n tmp = wieght - (1 / math.pow(10,x))\n if tmp < 1:\n break\n if x == 5:\n tmp = 0\n weight = tmp\n def set(post):\n weight = post\n \ndef main():\n Node.inp(3)\n Node.create(5,5)\nmain()\n \n \n","sub_path":"nureal network.py","file_name":"nureal network.py","file_ext":"py","file_size_in_byte":2290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"634385595","text":"import sys\nimport re\nimport os\n\npart = sys.argv[1]\n\nif not os.path.exists(\"../data/vaccine_distribution_text\"):\n os.makedirs(\"../data/vaccine_distribution_text\")\n\nif not os.path.exists(\"../data/vaccine_text_wo_distribution\"):\n os.makedirs(\"../data/vaccine_text_wo_distribution\")\n\ndistribution_regex1 = re.compile(r'\\b(distribution|distributed|distributing|distribute|distributes|deliver|delivered|delivery|batch|shipments|prioritize|prioritizing|prioritizes|1a|1b|1c|eligibility|deployment)\\b')\ndistribution_regex2 = re.compile(r'\\breceiv+?(ed\\b|es\\b|ing\\b|e\\b).*[0-9].*\\bdose+?(s\\b|\\b)')\nf = open('../data/vaccine_text/vaccine_text_part%s.csv'%(part),'r')\ndist_out = open('../data/vaccine_distribution_text/vaccine_distribution_text_part%s.csv' % (part), 'w+')\nwo_dist_out = open('../data/vaccine_text_wo_distribution/vaccine_text_wo_distribution_part%s.csv'%(part), 'w+')\ns = '|$|' \nfor lines in f.readlines():\n try:\n lst = lines.split('|$|')\n read_text = lst[0]\n clean_text = lst[1]\n read_user_id = lst[2]\n read_tweet_id = lst[3]\n out_str = s.join([read_text, clean_text, read_user_id, read_tweet_id])\n if distribution_regex1.search(clean_text):\n dist_out.writelines(out_str)\n elif distribution_regex2.search(clean_text):\n dist_out.writelines(out_str)\n else:\n wo_dist_out.writelines(out_str)\n except:\n pass\ndist_out.close()\nwo_dist_out.close()\nf.close()\n","sub_path":"code/select_distribution.py","file_name":"select_distribution.py","file_ext":"py","file_size_in_byte":1472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"359833689","text":"\"\"\"\r\nCreated on Sat May 9 22:59:57 2020\r\n\r\n@author: Madhan Kumar Selvaraj\r\n\"\"\"\r\n\r\nfrom flask import Flask, render_template, request\r\nfrom chatbot_run import chatbot_response\r\nfrom scrape import scrape_data\r\nimport speech_recognition as sr\r\nfrom gtts import gTTS \r\nimport os \r\nimport pyttsx3\r\nimport engineio\r\n\r\ncheck_wikipedia1 = ['what', 'is']\r\ncheck_wikipedia2 = ['who', 'is']\r\ncheck_wikihow = ['how', 'to']\r\n\r\n\r\n\r\ndef speak(text):\r\n language = 'en'\r\n output = gTTS(text=text, lang= language, slow= False)\r\n output.save(\"output.mp3\")\r\n os.system(\"start output.mp3\")\r\n \r\ndef speechtotext(text):\r\n x=pyttsx3.init()\r\n print(text)\r\n x.setProperty('rate',120)\r\n x.setProperty('volume',100)\r\n x.say(text)\r\n x.runAndAwait()\r\n\r\n\r\napp = Flask(__name__)\r\n@app.route(\"/home\")\r\ndef home():\r\n return render_template(\"index.html\")\r\n\r\n@app.route(\"/\")\r\ndef login():\r\n return render_template(\"login.html\")\r\n\r\n@app.route(\"/speech\")\r\ndef speech_recognition():\r\n r = sr.Recognizer()\r\n sr.pause_threshold = 0.5\r\n with sr.Microphone() as source:\r\n r.adjust_for_ambient_noise(source)\r\n audio = r.listen(source)\r\n try:\r\n return r.recognize_google(audio) \r\n except sr.UnknownValueError:\r\n error = \"error\"\r\n return error\r\n\r\n@app.route(\"/get\")\r\ndef get_bot_response():\r\n user_request = request.args.get('msg') # Fetching input from the user\r\n user_request = user_request.lower()\r\n if len(user_request.split(\" \")) > 1:\r\n check_search = user_request.split(\" \")[0]\r\n if check_search == 'google':\r\n user_request = user_request.replace(\"google\",\"\")\r\n user_request = user_request.translate ({ord(c): \"\" for c in \"!@#$%^&*()[]{};:,./<>?\\|`~-=_+\"})\r\n check_query = user_request.split(\" \")[1]\r\n check_text = user_request.split(\" \")[1:3]\r\n if check_text == check_wikipedia1 or check_text == check_wikipedia2:\r\n response = scrape_data(user_request, \"wikipedia\")\r\n elif check_text == check_wikihow:\r\n response = scrape_data(user_request, \"wikihow\")\r\n elif check_query == \"nearby\":\r\n response = scrape_data(user_request, \"nearby\")\r\n else:\r\n response = scrape_data(user_request, \"\")\r\n else:\r\n if user_request == 'how are you?' or user_request == 'how are you':\r\n response= 'Fine , Good to see you again'\r\n elif user_request == 'what is your name' or user_request == 'Who are you':\r\n response = ' I am farmbot, I help in agriculture related problems'\r\n else:\r\n user_request = user_request.translate ({ord(c): \"\" for c in \"!@#$%^&*()[]{};:,./<>?\\|`~-=_+\"})\r\n check_query = user_request.split(\" \")[1]\r\n check_text = user_request.split(\" \")[1:3]\r\n if check_text == check_wikipedia1 or check_text == check_wikipedia2:\r\n response = scrape_data(user_request, \"wikipedia\")\r\n elif check_text == check_wikihow:\r\n response = scrape_data(user_request, \"wikihow\")\r\n elif check_query == \"nearby\":\r\n response = scrape_data(user_request, \"nearby\")\r\n else:\r\n response = scrape_data(user_request, \"\") \r\n\r\n else:\r\n if user_request == 'hi':\r\n response = 'Hello , Thanks for your greeting'\r\n elif user_request == 'bye' or user_request == 'thankyou':\r\n response = 'bye, Thank you'\r\n elif user_request == 'hello':\r\n response = 'Good to see you again'\r\n else:\r\n response = 'hey'\r\n print(response)\r\n speechtotext(response)\r\n return response\r\n\r\nif __name__ == \"__main__\":\r\n app.run(threaded=False)\r\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"238437079","text":"from queue import Queue\n# maxsize默认为0,不受限\n# 一旦>0,而消息数又达到限制,q.put()也将阻塞\nq = Queue(maxsize=0)\n\n# 阻塞程序,等待队列消息。\nq.get()\n\n# 获取消息,设置超时时间\nq.get(timeout=5.0)\n\n# 发送消息\nq.put()\n\n# 等待所有的消息都被消费完\nq.join()\n\n# 以下三个方法,知道就好,代码中不要使用\n\n# 查询当前队列的消息个数\nq.qsize()\n\n# 队列消息是否都被消费完,True/False\nq.empty()\n\n# 检测队列里消息是否已满\nq.full()","sub_path":"case/各种小资源/线程通信-queue.py","file_name":"线程通信-queue.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"83697106","text":"import os\nfrom sklearn2sql_heroku.tests.classification import generic as class_gen\n\ndef createDirIfNeeded(dirname):\n try:\n os.makedirs(dirname);\n except:\n pass\n\n\n\ndef create_script(model, ds, dialect):\n print(\"GENERATING_MODEL\" , model, ds, dialect);\n dirname = \"tests/classification/\" + str(ds) ;\n print(dirname);\n createDirIfNeeded(dirname);\n filename = dirname + \"/ws_\" + ds + \"_\" + model + \"_\" + dialect + \"_code_gen.py\";\n file = open(filename, \"w\");\n print(\"WRTITING_FILE\" , filename);\n file.write(\"from sklearn2sql_heroku.tests.classification import generic as class_gen\\n\");\n file.write(\"\\n\\n\");\n args = \"\\\"\" + model + \"\\\" , \\\"\" + ds + \"\\\" , \\\"\" + dialect + \"\\\"\";\n file.write(\"class_gen.test_model(\" + args + \")\\n\");\n file.close();\n\nmodels = class_gen.get_models()\ndatasets = class_gen.get_datasets()\n\ndialects = class_gen.get_known_dialects()\n\n\nfor model in models.keys():\n for ds in datasets.keys():\n for dialect in dialects:\n create_script(model , ds, dialect)\n\n","sub_path":"tests/classification/gen_all.py","file_name":"gen_all.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"378555816","text":"import json\r\n\r\ndef clean_sentence(s):\r\n def remove_char(s, c):\r\n while c in s:\r\n s = s.replace(c, '')\r\n return s\r\n\r\n s = remove_char(s, '(')\r\n s = remove_char(s, ')')\r\n\r\n # remove 'A' to 'Z'\r\n for i in range(65, 91):\r\n s = remove_char(s, chr(i))\r\n\r\n s = s.split(' ')\r\n while '' in s:\r\n s.remove('')\r\n while ' ' in s:\r\n s.remove(' ')\r\n\r\n s = ' '.join(s)\r\n\r\n return s\r\n\r\n\r\ndef find_type(s, target):\r\n result = []\r\n\r\n def find_type_once(s, target):\r\n oriStr = s\r\n idx = s.index(target)\r\n s = s[idx + len(target):]\r\n\r\n bracketCnt = 1\r\n resultStr = ''\r\n while bracketCnt != 0:\r\n c = s[0]\r\n resultStr += c\r\n s = s[1:]\r\n\r\n if c == '(':\r\n bracketCnt += 1\r\n elif c == ')':\r\n bracketCnt -= 1\r\n\r\n resultStr = resultStr[:-1]\r\n nextStr = oriStr[:idx] + target.lower() + oriStr[idx+len(target):]\r\n\r\n return resultStr.strip(), nextStr\r\n\r\n while target in s:\r\n resultStr, s = find_type_once(s,target)\r\n result.append(resultStr)\r\n\r\n return result\r\n\r\ndef read_json(filename):\r\n with open(filename, \"r\") as jfile:\r\n content = json.load(jfile)\r\n return content\r\n\r\ndef nn_dict(inputString):\r\n addList = []\r\n npResult = find_type(inputString, 'NN ')\r\n for npItem in npResult:\r\n addList.append(npItem)\r\n npResult = find_type(inputString,'NNS ')\r\n for npItem in npResult:\r\n addList.append(npItem)\r\n return addList\r\n\r\ndef get_all_nouns():\r\n jsonResult = read_json('new_out_parser.json')[0]\r\n allDict = {}\r\n allList = []\r\n for key in jsonResult:\r\n value = jsonResult[key]\r\n for instrucString in value:\r\n tmpList = nn_dict(instrucString)\r\n for nnItem in tmpList:\r\n try:\r\n allDict[nnItem] += 1\r\n except KeyError:\r\n allDict[nnItem] = 1\r\n for key in allDict:\r\n if allDict[key]>2:\r\n allList.append(key)\r\n return allList\r\n\r\ndef list2file(addr, list):\r\n with open(addr, \"w\") as file:\r\n for item in list:\r\n file.write(\"%s\\n\"%item)\r\n\r\ndef write_dict_file():\r\n allList = get_all_nouns()\r\n list2file(\"dict.txt\",allList)\r\n\r\ndef file2list(addr):\r\n with open(addr,\"r\") as file:\r\n content = file.readlines()\r\n content = [x.strip() for x in content]\r\n return content\r\n\r\ndef get_nouns_list():\r\n objList = []\r\n regionList = []\r\n locList = []\r\n allList = file2list(\"dict.txt\")\r\n for line in allList:\r\n if line[0]=='!':\r\n regionList.append(line[1:])\r\n elif line[0]=='@':\r\n locList.append(line[1:])\r\n else:\r\n objList.append(line)\r\n return objList,regionList,locList\r\n\r\n\r\ndef join_nn_str(inputString):\r\n #join '(DT the) (NN right)' as (NN the_right)\r\n joinStr = ''\r\n for wordItem in inputString:\r\n if wordItem.islower() or wordItem == '(':\r\n joinStr += wordItem\r\n # '(the(right'\r\n newStr = '_'.join(joinStr[1:].split('('))\r\n return newStr + ')'\r\n # return '(NN '+newStr + ')'\r\n\r\ndef wcf_clean_sentence():\r\n jsonResult = read_json('new_out_parser.json')[0]\r\n objList,regionList,locList = get_nouns_list()\r\n newDict = {}\r\n for key in jsonResult:\r\n value = jsonResult[key] # value is a list of instructions\r\n for instrucString in value: #every VP\r\n newInstrucStr = instrucString\r\n npList = find_type(instrucString,'NP ')\r\n for npItem in npList:\r\n if \"NP\" not in npItem:\r\n newNpItem = join_nn_str(npItem)\r\n replaceFlag = True\r\n nnItem1 = find_type(npItem,'NN ')\r\n nnItem2 = find_type(npItem,'NNS ')\r\n nnItem = nnItem1 + nnItem2\r\n if len(nnItem) == 0:\r\n replaceFlag=False\r\n elif nnItem[-1] in objList:\r\n newNpItem = '(NN OBJ_' + newNpItem\r\n elif nnItem[-1] in regionList:\r\n newNpItem = '(NN REGION_' + newNpItem\r\n elif nnItem[-1] in locList:\r\n newNpItem = '(NN LOCATION_' + newNpItem\r\n else:\r\n replaceFlag = False\r\n if replaceFlag == True:\r\n newInstrucStr = newInstrucStr.replace(npItem,newNpItem)\r\n #replace the npItem corrsponding to the nnItem\r\n try:\r\n newDict[key].append(newInstrucStr)\r\n except KeyError:\r\n newDict[key] = []\r\n newDict[key].append(newInstrucStr)\r\n newList = []\r\n newList.append(newDict)\r\n with open(\"new_new_out_parser.json\",\"w\") as pfile:\r\n json.dump(newList,pfile, indent = 4)\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n #write_dict_file()\r\n #print(join_nn_str(\"(DT the) (NN right)\"))\r\n wcf_clean_sentence()\r\n \r\n#s = '(ROOT(S(VP (VB go) (PP (TO to)(NP(NP (DT the) (NN right))(PP (IN of)(NP (DT the) (NN sofa))))))))'\r\n#result = find_type(s,'NN')\r\n#print (result)\r\n#['right', 'sofa']\r\n#['(NP (DT the) (NN right))(PP (IN of)(NP (DT the) (NN sofa)))', '(DT the) (NN right)', '(DT the) (NN sofa)']\r\n#(NP(NN()))","sub_path":"approaches/stringmatch/clean_sentence.py","file_name":"clean_sentence.py","file_ext":"py","file_size_in_byte":5415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"90631423","text":"#!/bin/env python\n# coding:utf-8\n\n\nclass Secretary:\n def __init__(self):\n self.appointment = {}\n\n def request_appointment(self, when, who):\n if(when in self.appointment):\n return False\n else:\n self.appointment[when] = who\n return True\n\n def cancel_appointment(self, when, who):\n if self.appointment.has_key(when):\n if self.appointment[when] == who:\n del self.appointment[when]\n return True\n return False\n\n def update_appointment(self, when1,when2 , who):\n if self.cancel_appointment(when1, who):\n self.request_appointment(when2, who)\n return True\n return False\n\n def get_schedule(self):\n return str(self.appointment)\n\n\nclass Manager:\n def __init__(self):\n self.sara = Secretary()\n\n def check_schedule(self):\n schedule = self.sara.get_schedule()\n print(schedule)\n\n def get_secretary(self):\n return self.sara\n\n\nclass Client:\n def __init__(self, name):\n self.name = name\n self.contact_point = None\n\n def set_contact_point(self, contact_point):\n self.contact_point = contact_point\n\n def make_appointment(self, when):\n if(self.contact_point):\n is_success = self.contact_point.request_appointment(when, self.name)\n print(self.name + \" could book? : \" + str(is_success))\n\n def cancel_appointment(self, when):\n if(self.contact_point):\n is_success = self.contact_point.cancel_appointment(when, self.name)\n print(self.name + \" could cancel? : \" + str(is_success))\n\n def update_appointment(self, when1, when2):\n if(self.contact_point):\n is_success = self.contact_point.update_appointment(when1, when2, self.name)\n print(self.name + \" could update? : \" + str(is_success))\n\nbob = Manager()\n\nadam = Client('adam')\nadam.set_contact_point(bob.get_secretary())\nadam.make_appointment('10:30')\n\ncharles = Client('charles')\ncharles.set_contact_point(bob.get_secretary())\ncharles.make_appointment('11:30')\n\ndag = Client('dag')\ndag.set_contact_point(bob.get_secretary())\ndag.make_appointment('10:30')\n\nadam.cancel_appointment('09:30') # 9:30には予約していないのでダメ\nadam.cancel_appointment('10:30') # cancelできる\n\nadam.cancel_appointment('11:30') # charles のを消そうとしているのでダメ\n\ndag.make_appointment('10:30') # ok\n\ncharles.update_appointment('10:00','12:30') # 10:00は予約していない\ncharles.update_appointment('11:30','12:30') # ok\n\n\nbob.check_schedule()","sub_path":"py/22.py","file_name":"22.py","file_ext":"py","file_size_in_byte":2614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"627190659","text":"import os;\r\nimport time;\r\nfrom os.path import *\r\n\r\ndef pull():\r\n os.system('hg pull')\r\n os.system('hg up')\r\n\r\n \r\ndef createBuildFile():\r\n if isfile('buildNumber') == False:\r\n f = open('buildNumber', 'w')\r\n f.write('0')\r\n f.close()\r\n \r\ndef tag():\r\n createBuildFile() \r\n f = open('buildNumber', 'r')\r\n num = f.read()\r\n f.close()\r\n num = int(num)\r\n num = num + 1\r\n num = str(num)\r\n os.system('hg tag -l ' + num)\r\n f = open('buildNumber', 'w')\r\n f.write('' + num)\r\n f.close()","sub_path":"build/hg.py","file_name":"hg.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"417668016","text":"#########################################################################################################\n# 1. Дан список строк my_list. Создать новый список в который поместить\n# элементы из my_list по следующему правилу:\n# Если строка стоит на нечетном месте в my_list, то ее заменить на\n# перевернутую строку. \"qwe\" на \"ewq\".\n# Если на четном - оставить без изменения.\n# Задание сделать с использованием enumerate.\n\nmy_list = ['qwe', 'rty', 'uio', 'asd', 'fgh']\nresult_list = []\nfor index, word in enumerate(my_list):\n if index % 2 != 0:\n result_list.append(word[::-1])\n else:\n result_list.append(word)\nprint(result_list)\n\n\n#########################################################################################################\n# 2. Дан список строк my_list. Создать новый список в который поместить\n# элементы из my_list у которых первый символ - буква \"a\".\n\nmy_list = ['qwe', 'rty', 'uio', 'asd', 'fgh', 'azx', 'qwa']\nresult_list = []\nfor word in my_list:\n if word[0] == 'a':\n result_list.append(word)\nprint(result_list)\n\n\n#########################################################################################################\n# 3. Дан список строк my_list. Создать новый список в который поместить\n# элементы из my_list в которых есть символ - буква \"a\" на любом месте.\n\nmy_list = ['qwe', 'rty', 'uio', 'asd', 'fgh', 'azx', 'qwa']\nresult_list = []\nfor word in my_list:\n if 'a' in word:\n result_list.append(word)\nprint(result_list)\n\n\n#########################################################################################################\n# 4. Дан список my_list в котором могум быть как строки (type str) так и целые числа (type int).\n# Создать новый список в который поместить только строки из my_list.\n\nmy_list = ['qwe', 1, 'rty', 2, 'uio', 3, 'asd', 4, 'fgh', 5, 'azx', 6, 'qwa']\nresult_list = []\nfor element in my_list:\n if element == str(element):\n result_list.append(element)\nprint(result_list)\n\n\n#########################################################################################################\n# 5. Дана строка my_str. Создать список в который поместить те символы из my_str,\n# которые встречаются в строке только один раз.\n\nmy_str = str('adsfadfadfaq')\nresult_list = []\nfor index in my_str:\n if my_str.count(index) == 1:\n result_list.append(index)\nprint(result_list)\n\n\n#########################################################################################################\n# 6. Даны две строки. Создать список в который поместить те символы,\n# которые есть в обеих строках хотя бы раз.\n\nmy_str_1 = str('qfgshghg')\nmy_str_2 = str('qwgerty')\nmy_set = set(my_str_1)\nresult_list = list(my_set.intersection(my_str_2))\nprint(result_list)\n\n\n#########################################################################################################\n# 7. Даны две строки. Создать список в который поместить те символы, которые есть в обеих строках,\n# но в каждой только по одному разу.\n\nmy_str_1 = str('qweax')\nmy_str_2 = str('asdqax')\nmy_list_1 = []\nmy_list_2 = []\nfor index in my_str_1:\n if my_str_1.count(index) == 1:\n my_list_1.append(index)\nfor index in my_str_2:\n if my_str_2.count(index) == 1:\n my_list_2.append(index)\nset_result_list_1 = set(my_list_1)\nresult_list = list(set_result_list_1.intersection(my_list_2))\nprint(result_list)\n\n\n#########################################################################################################\n# 8. Описать с помощью словаря следующую структуру для конкретного человека (можно придумать):\n# Фамилия\n# Имя\n# Возраст\n# Проживание\n# Страна\n# Город\n# Улица\n# Работа\n# Наличие\n# Должность\n\nperson = {\n 'Second name': \"da Vinci\",\n 'First name': 'Leonardo',\n 'Age': 67,\n 'Location': {\n 'Country': 'Republic of Florence',\n 'City': 'Vinci',\n 'Street': 'Leonardo street'\n },\n 'Job': {\n 'Employment': '+',\n 'Position': 'Polymath'\n }}\nprint(person)\n\n\n#########################################################################################################\n# 9. Описать с помощью словаря следующую структуру (рецепт ненастоящего торта,\n# придумать и указать граммы для продуктов):\n# Составляющие\n# Коржи\n# Мука\n# Молоко\n# Масло\n# Яйцо\n# Крем\n# Сахар\n# Масло\n# Ваниль\n# Сметана\n# Глазурь\n# Какао\n# Сахар\n# Масло\n\nComponents = {\n 'Cake': {\n 'Flour': '300 grams',\n 'Milk': '250 milliliters',\n 'Butter': '50 grams',\n 'Eggs': 'two'\n },\n 'Cream': {\n 'Sugar': '50 grams',\n 'Butter': '100 grams',\n 'Vanilla': '10 grams',\n 'Sour cream': '20 grams'\n },\n 'Glaze': {\n 'Cocoa': '30 grams',\n 'Sugar': '50 grams',\n 'Butter': '10 grams'\n }}\nprint(Components)","sub_path":"hw6.py","file_name":"hw6.py","file_ext":"py","file_size_in_byte":5937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"587395083","text":"import cv2\r\nimport os\r\nimport numpy as np\r\nfrom MakeRGB import my_cmap\r\nfrom matplotlib import pyplot as plt\r\nfrom copy import deepcopy\r\nimport sys\r\nimport netCDF4 as nc\r\nfrom bt_tests import save_obj, load_obj\r\nthresh = 2.0\r\ndilatation_size = 11\r\nelement = cv2.getStructuringElement(cv2.MORPH_RECT, (2 * dilatation_size + 1, 2 * dilatation_size + 1),\r\n (dilatation_size, dilatation_size))\r\n\r\nmap_cloud = deepcopy(my_cmap)\r\nmap_cloud.set_over((128 / 255.0, 128 / 255.0, 128 / 255.0))\r\n\r\nfull_out_path = r\"D:\\Users\\Alexander\\ACSPO\\Opencv\\btd_acspo\"\r\n\r\nfiles_to_delete = os.listdir(full_out_path)\r\n\r\nfor file_d in files_to_delete:\r\n os.remove(os.path.join(full_out_path, file_d))\r\n\r\n\r\ndef to_grayscale(array: np.array, dt_max=2, dt_min=-2):\r\n median = np.nanmedian(array)\r\n max_val = np.nanmax(array)\r\n min_val = np.nanmin(array)\r\n print(min_val, max_val, median)\r\n array[np.isnan(array)] = median\r\n max_val = min(dt_max, max_val)\r\n min_val = max(dt_min, min_val)\r\n array[array > max_val] = max_val\r\n array[array < min_val] = min_val\r\n result: np.array = (array - min_val) / (max_val - min_val) * 255\r\n return result.astype(np.uint8)\r\n\r\n\r\npath = r\"C:\\Users\\Alexander Semenov\\Desktop\\ABI_ACSPO_CODE\\cloudmasktest\\tiles_pics\\all_areas\"\r\n\r\nfiles = os.listdir(path)\r\nfiles.sort()\r\nparam = 1.8\r\n\r\nrad = 15\r\ntests_data = load_obj(\"data\")\r\nn_figures = 1\r\nn_col = 6\r\n\r\nlabel_size = 15\r\nfont_size = 15\r\n\r\nindex = 6\r\n\r\nshow_other = False\r\nif ~show_other:\r\n n_col = 3\r\n\r\nfor index in range(len(files)):\r\n file = files[index]\r\n\r\n nc_data = nc.Dataset(os.path.join(path, file), \"r\")\r\n\r\n sst_reynolds = np.array(nc_data.variables[\"sst_reynolds\"])\r\n sst_regression = np.array(nc_data.variables[\"sst_regression\"])\r\n individual = np.array(nc_data[\"individual_clear_sky_tests_results\"][:])\r\n nc_data.close()\r\n adaptive_mask = individual >> 3 & 1\r\n gradientx = cv2.Sobel(sst_reynolds, ddepth=cv2.CV_32FC1, dx=1, dy=0)\r\n\r\n gradienty = cv2.Sobel(sst_reynolds, ddepth=cv2.CV_32FC1, dx=0, dy=1)\r\n\r\n abs_gradient = gradientx ** 2 + gradienty ** 2\r\n mask_gradient = abs_gradient > param\r\n mask_gradient = cv2.GaussianBlur(mask_gradient.astype(np.float32), (rad, rad), 0)\r\n\r\n sst_reynolds_max = cv2.dilate(sst_reynolds, element)\r\n sst_reynolds_min = cv2.erode(sst_reynolds, element)\r\n\r\n corrected_mask = ~(mask_gradient > 0) & (adaptive_mask > 0)\r\n corrected_mask = ~((sst_reynolds_max - sst_reynolds_min) > thresh) & (adaptive_mask > 0)\r\n\r\n mask_BTD = tests_data[\"FULL_BTD_MASK\"][index, :, :]\r\n\r\n delta_sst = tests_data[\"delta_sst\"][index, :, :]\r\n total_mask = tests_data[\"Individual\"][index, :, :]\r\n delta_sst[delta_sst > 2.0] = 2.0\r\n delta_sst[delta_sst < -2.0] = -2.0\r\n Validation = tests_data[\"Validation\"][index, :, :]\r\n Original = tests_data[\"Original\"][index, :, :]\r\n nan_mask = np.isnan(tests_data[\"delta_sst\"][index, :, :])\r\n fig = plt.figure(figsize=(20, 10))\r\n fig.add_subplot(n_figures, n_col, 1)\r\n to_show = np.where((mask_BTD > 0) | corrected_mask, 100.0, delta_sst)\r\n to_show[nan_mask] = np.NaN\r\n plt.imshow(to_show, interpolation=\"none\", vmin=-2, vmax=2, cmap=map_cloud)\r\n plt.title(\"BTD_MASK + Static_Adaptive\\ncorrected by gradient\", fontsize=font_size)\r\n cbar3 = plt.colorbar(fraction=0.046, pad=0.04)\r\n cbar3.ax.tick_params(labelsize=label_size)\r\n plt.xticks([])\r\n plt.yticks([])\r\n # plotting the Validation mask\r\n fig.add_subplot(n_figures, n_col, 2)\r\n to_show = np.where(Validation > 0, 100.0, delta_sst)\r\n to_show[nan_mask] = np.NaN\r\n plt.imshow(to_show, interpolation=\"none\", vmin=-2, vmax=2, cmap=map_cloud)\r\n plt.title(\"Validation\", fontsize=font_size)\r\n cbar3 = plt.colorbar(fraction=0.046, pad=0.04)\r\n cbar3.ax.tick_params(labelsize=label_size)\r\n plt.xticks([])\r\n plt.yticks([])\r\n\r\n fig.add_subplot(n_figures, n_col, 3)\r\n to_show = np.where((mask_BTD > 0), 100.0, delta_sst)\r\n to_show[nan_mask] = np.NaN\r\n plt.imshow(to_show, interpolation=\"none\", vmin=-2, vmax=2, cmap=map_cloud)\r\n plt.title(\"BTD_ORIGINAL\", fontsize=font_size)\r\n cbar3 = plt.colorbar(fraction=0.046, pad=0.04)\r\n cbar3.ax.tick_params(labelsize=label_size)\r\n plt.xticks([])\r\n plt.yticks([])\r\n\r\n if show_other:\r\n\r\n\r\n\r\n\r\n fig.add_subplot(n_figures, n_col, 4)\r\n to_show = np.where((Original > 0), 100.0, delta_sst)\r\n to_show[nan_mask] = np.NaN\r\n plt.imshow(to_show, interpolation=\"none\", vmin=-2, vmax=2, cmap=map_cloud)\r\n plt.title(\"ACSPO ORIGINAL\", fontsize=font_size)\r\n cbar3 = plt.colorbar(fraction=0.046, pad=0.04)\r\n cbar3.ax.tick_params(labelsize=label_size)\r\n plt.xticks([])\r\n plt.yticks([])\r\n\r\n fig.add_subplot(n_figures, n_col, 5)\r\n to_show = sst_reynolds\r\n to_show[nan_mask] = np.NaN\r\n plt.imshow(to_show, interpolation=\"none\",cmap=my_cmap)\r\n plt.title(\"sst_reynolds\", fontsize=font_size)\r\n cbar3 = plt.colorbar(fraction=0.046, pad=0.04)\r\n cbar3.ax.tick_params(labelsize=label_size)\r\n plt.xticks([])\r\n plt.yticks([])\r\n\r\n fig.add_subplot(n_figures, n_col, 6)\r\n to_show = sst_regression\r\n to_show[nan_mask] = np.NaN\r\n plt.imshow(to_show, interpolation=\"none\",cmap=my_cmap,vmin=np.nanmin(sst_reynolds)-2.0)\r\n plt.title(\"sst_regression\", fontsize=font_size)\r\n cbar3 = plt.colorbar(fraction=0.046, pad=0.04)\r\n cbar3.ax.tick_params(labelsize=label_size)\r\n plt.xticks([])\r\n plt.yticks([])\r\n\r\n plt.savefig(os.path.join(full_out_path, file + \".jpg\"))\r\n plt.close()\r\n\r\nsys.exit()\r\n","sub_path":"denoise.py","file_name":"denoise.py","file_ext":"py","file_size_in_byte":5676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"456736329","text":"from datetime import datetime, timedelta\nimport csv\nimport os\ntoday = (datetime.utcnow() + timedelta(hours = 10)).date()\n\n\ninput_files = [x for x in os.listdir() if 'CLG_REFRESH' in x and x.endswith('.txt')]\nprint(input_files)\nfor input_file in input_files:\n \n property_id_sum = 0\n row_count = 0\n row_count_final = 0\n \n print('Processing file: ' + input_file)\n ## calculate property ID sum\n with open(input_file, newline='', encoding = 'utf-8') as csvfile:\n reader = csv.reader(csvfile, delimiter='|', quoting=csv.QUOTE_NONE)\n for row in reader:\n row_count = row_count + 1\n if row_count == 1:\n pid_col = row.index('CL_PROPERTY_ID')\n print('CL_PROPERTY_ID is in column: {}'.format(pid_col))\n else:\n try:\n if row[pid_col] != '':\n property_id_sum = property_id_sum + int(row[pid_col])\n except Exception as e:\n print(row)\n print(row[pid_col])\n raise Exception(e)\n \n \n ## output file\n output_file_name = input_file.replace('.txt', '.DAT')\n if os.path.isfile(output_file_name):\n os.remove(output_file_name)\n \n ## write final final\n header_str = 'H|{}|{}|{}'.format(today.strftime('%Y%m%d'), today.strftime('%Y%m%d%H%m%S'), output_file_name)\n footer_str = 'T|{}|{}|CL_PROPERTY_ID'.format(row_count+1, property_id_sum) ## + 1 header +1 footer - 1 column name. Got same result as the row_count_final.\n print(\"header: \", header_str)\n print(\"footer: \", footer_str)\n \n is_first_row = True\n \n with open(output_file_name, 'w', newline='\\r\\n', encoding = 'utf-8') as output_fl:\n output_fl.write(header_str + '\\n')\n row_count_final = row_count_final + 1\n with open(input_file, encoding = 'utf-8') as input_fl: \n for line in input_fl:\n # skip header row\n if is_first_row:\n is_first_row = False\n else:\n output_fl.write(line) \n row_count_final = row_count_final + 1\n\n output_fl.write(footer_str) # Final row does not need '\\r\\n'\n row_count_final = row_count_final\n\n print('Input file \"{}\" row count: {}'.format(input_file, row_count))\n print('Output file \"{}\" row count: {}'.format(output_file_name, row_count_final))\n print('\\n')\n\n ## remove input (otherwise it will get packaged up and delivered)\n os.remove(input_file)\n \nprint('done')\n","sub_path":"post-processing_Alex.py","file_name":"post-processing_Alex.py","file_ext":"py","file_size_in_byte":2578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"524737787","text":"import subprocess\n\nfrom . import wifi, constants\n\n# Local Constants\nBINPATH = constants.BINPATH / \"linux\"\n\n\nclass _WifiLinux(wifi.Wifi):\n \"\"\"wifi class that utilizes commands built into Linux\n\n - iwconfig\n - ifconfig\n - iwlist\n \"\"\"\n\n @property\n def status(self)->dict:\n \"\"\"status of the interface\n \n Returns:\n dict -- interface status as a dictionary\n \"\"\"\n ret = super().status\n ret = {\"name\": self.interface, \"network\": self._network_status,\n \"interface\": self._interface_status}\n # If interface is connected to a network\n if ret[\"network\"] == constants.ONLINE:\n ret = {**ret, 'ssid': self._ssid, 'frequency': self._frequency}\n return ret\n\n def update_status(self):\n \"\"\"API function that calls private functions to refresh the status\n \"\"\"\n if super().update_status():\n self._status_interface()\n self._status_network()\n\n def _status_interface(self):\n \"\"\"updates interface status\n \"\"\"\n self._logger.info(\n \"checking interface {} status\".format(self.interface))\n raw = subprocess.run([\"bash\", BINPATH.joinpath(\n \"status\"), self.interface], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n err = raw.stderr.decode('utf-8')\n stdout = raw.stdout.decode('utf-8').replace(\"\\n\", \"\")\n\n self._logger.debug(\"stdout : {} \\nstder : {}\".format(stdout, err))\n\n # Get status\n status = constants.OFFLINE\n if err != \"\":\n self._logger.error(\n \"interface {} status is unknown\".format(self.interface))\n self._logger.error(\"traceback : \\n {}\".format(err))\n status = constants.UNKNOWN\n elif stdout == \"0x1003\":\n self._logger.info(\n \"interface {} is set to 'up'\".format(self.interface))\n status = constants.ONLINE\n else:\n self._logger.warning(\n \"interface {} is set to 'down'\".format(self.interface))\n\n self._interface_status = status\n\n def _status_network(self):\n \"\"\"updates network status\n \"\"\"\n self._logger.info(\n \"checking interface {} network connections\".format(self.interface))\n raw = subprocess.run([\"bash\", BINPATH.joinpath(\n \"network\")], stdout=subprocess.PIPE).stdout.decode('utf-8')\n self._logger.debug(\"stdout : \\n{}\".format(raw))\n\n # Decode raw stdout\n device_list = raw.split(\"\\n\")\n\n device_dict = {}\n for pair in device_list:\n if pair != \"\":\n key_val = pair.split(\":\")\n device_dict[key_val[0]] = key_val[1]\n self._logger.debug(\"interface list {}\".format(device_dict))\n\n # Get status\n status = constants.OFFLINE\n if self.interface not in device_dict:\n self._logger.error(\n \"interface {} network status is unknown\".format(self.interface))\n status = status = constants.UNKNOWN\n elif device_dict[self.interface] == \" up\":\n status = constants.ONLINE\n self._logger.info(\n \"interface {} is connected to a network\".format(self.interface))\n\n # Checks for ssid and frequency if connected\n self._linux_status_network_helper()\n else:\n self._logger.warning(\n \"interface {} is not connected to a network\".format(self.interface))\n\n self._network_status = status\n\n def _linux_status_network_helper(self):\n \"\"\"helper function to update network status\n \"\"\"\n raw = subprocess.run([\"bash\", BINPATH.joinpath(\n \"iwconfig\"), self.interface], stdout=subprocess.PIPE).stdout.decode('utf-8')\n\n raw = raw[raw.find(\"ESSID\")+7:]\n self._ssid = raw[:raw.find('\"')]\n raw = raw[raw.find(\"Frequency\")+10:]\n self._frequency = raw[:raw.find(\"GHz\")-1]\n self._logger.info(\"ssid : {}, frequency : {}\".format(\n self._ssid, self._frequency))\n\n @property\n def interface(self)->str:\n \"\"\"necessary for the setter\n \n Returns:\n str -- the name of the interface\n \"\"\"\n return super().interface\n\n @interface.setter\n def interface(self, interface: str):\n \"\"\"setter for interface\n \n Arguments:\n interface {str} -- the name of the interface\n \"\"\"\n self._interface = interface\n self._validate_interface()\n\n def _set_interface(self, status: bool):\n \"\"\"sets the interface to on or off\n \n Arguments:\n status {bool} -- True:on, False:off\n \"\"\"\n # Convert status to str\n setting = \"down\"\n if status:\n setting = \"up\"\n\n self._logger.debug(\"setting device {} to {}\".format(\n self, self.interface, setting))\n subprocess.run([\"bash\", BINPATH.joinpath(\n \"set\"), self.interface, setting])\n\n def _validate_interface(self):\n \"\"\"Tests interface and sets it to None if it fails\n \"\"\"\n # Update status information\n self.update_status()\n\n # Determine interface validity through status code\n if self.status[\"interface\"] == 0:\n self._logger.info(\n \"interface {} is already online\".format(self.interface))\n elif self.status[\"interface\"] == 1:\n # Automatically try to bring the interface online\n self._logger.warning(\n \"interface {} is down. Trying to set to up\".format(self.interface))\n self._set_interface(True)\n\n # Update status information again\n self.update_status()\n\n # Check if interface is online\n if self.status[\"interface\"] != 0:\n self._logger.error(\n \"interface {} could not be brought up\".format(self.interface))\n self._interface = None\n return\n self._logger.info(\n \"interface {} successfully brought up\".format(self.interface))\n elif self.status[\"interface\"] == 2:\n self._logger.error(\n \"interface {} status is unknown\".format(self.interface))\n self._interface = None\n\n else:\n self._logger.error(\"unknown interface status : {}\".format(\n self.status[\"interface\"]))\n self._interface = None\n\n def connect(self, ssid: str, passwd: str, **kwargs)->bool:\n \"\"\"connects to a WPA network\n \n Arguments:\n ssid {str} -- ssid of network\n passwd {str} -- password for network\n \n Keyword Arguements:\n country {str} -- country code https://www.iso.org/obp/ui/#search\n hidden_network {bool} -- True:is hdden, False:is not hidden\n \n Returns:\n bool -- True:connected, False:failed connecting\n \"\"\"\n if super().connect(ssid, passwd):\n self._connect(ssid, passwd, kwargs.get(\n \"country\", \"US\"), kwargs.get(\"hidden_network\", False))\n return self.connect_helper()\n\n def _connect(self, ssid: str, passwd: str, country: str, hidden_network: bool):\n \"\"\"private function that connect calls.\n\n DO NOT CALL INDEPENDENTLY\n \n Arguments:\n ssid {str} -- ssid of network\n passwd {str} -- password for network\n country {str} -- country code https://www.iso.org/obp/ui/#search\n hidden_network {bool} -- True:is hdden, False:is not hidden\n \"\"\"\n if self.status[\"interface\"] != 0:\n self._logger.error(\n \"interface {} is 'down' or unknown\".format(self.interface))\n return\n\n if self.status[\"network\"] == 0:\n self._logger.warning(\n \"Currently connected to a network {}\".format(self.status[\"ssid\"]))\n\n wpa_string = self._wpa_passphrase(\n ssid, passwd, country, hidden_network)\n self._wpa_supplicant(wpa_string)\n\n def _wpa_passphrase(self, ssid: str, passwd: str, country: str, hidden_network: bool)->str:\n \"\"\"creates a wpa_supplicant.conf file as a string\n \n Arguments:\n ssid {str} -- ssid of network\n passwd {str} -- password for network\n country {str} -- country code https://www.iso.org/obp/ui/#search\n hidden_network {bool} -- True:is hdden, False:is not hidden\n \n Returns:\n str -- generated wpa_supplicant.conf as a string\n \"\"\"\n raw = subprocess.run([\"bash\", BINPATH.joinpath(\n \"wpa_passphrase\"), ssid, passwd], stdout=subprocess.PIPE).stdout.decode('utf-8')\n\n # Remove string psk\n clean = raw[:raw.find(\"#\")]+raw[raw.find(\"\\n\", raw.find(\"#\"))+2:]\n\n # Prepend network information\n clean = \"country={}\\n\\n\".format(country) + clean\n\n self._logger.info(\"Country Code : {}\".format(country))\n\n # Hidden network\n if hidden_network:\n self._logger.info(\"hidden network setting requested\")\n clean = clean[:len(clean)-2]\n clean += \"\\n scan_ssid = 1\\n}\"\n\n return clean\n\n def _wpa_supplicant(self, config: str):\n \"\"\"loads the wpa_supplicant.conf string\n \n Arguments:\n config {str} -- the wpa_supplicant.conf file as a string\n \"\"\"\n self._logger.debug(\"config : \\n{}\".format(config))\n\n raw = subprocess.run([\"bash\", BINPATH.joinpath(\n \"wpa_supplicant\"), self.interface, config])\n\n def scan_ssid(self)->bool:\n \"\"\"refreshes ssid_list with seeable networks\n \n Returns:\n bool -- T/F = Worked/Failed\n \"\"\"\n if super().scan_ssid():\n if self.interface == \"\":\n self._logger.error(\n \"Can not scan for networks without an interface\")\n return False\n\n raw = subprocess.Popen(['sudo', BINPATH.joinpath(\"scan\"), self.interface],\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n # Check for errors\n err = raw.stderr.read().decode('utf-8')\n if err != \"\":\n self._logger.error(\n \"traceback: {}\".format(err.replace('\\n', ''))) # Get rid of extra new lines\n return False\n\n clean = raw.stdout.read().decode('utf-8')\n\n # Parse stdout\n while True:\n index = clean.find(\"ESSID\")\n if index == -1:\n break\n clean = clean[index+7:]\n ssid = clean[:clean.find('\"')]\n if ssid != \"\":\n self._logger.debug(\"ssid: {} found\".format(ssid))\n self._ssid_list.append(ssid)\n return self.scan_ssid_helper()\n","sub_path":"WiPi/linux.py","file_name":"linux.py","file_ext":"py","file_size_in_byte":10911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"486918099","text":"import socket\nimport time\nimport json\nfrom threading import Thread\nimport numpy as np\n\n\n# TODO: add pressure values and JSON settings\n# TODO: add rotation values and JSON settings\n\n\n# Object to hold LabviewInterface settings and function calls\nclass LabviewInterface(object):\n # The program connects to a server through TCP.\n # The server hosts the LabView program that controls the LabviewInterface.\n # The program sends control information to the LabviewInterface server, which are sent as a JSON cluster\n # The cluster is rebuilt into a control on the server and the command executed.\n\n ####################################################################\n # Class Variables #\n # These set the LabviewInterface controller fields and controls #\n # and manage toggles used in the python program #\n ####################################################################\n\n # TCP Settings\n server = 0\n incoming = 0 # receive thread\n\n # TCP Control Commands\n isQuit = False # quit application\n isFlush = False # flush queue\n\n # Controller Settings\n # Fields\n isDisplayingPosition = False\n position = [] # position of XYTable\n pressure = 0\n rotation = 0\n home = [0, 0] # home co-ordinates\n dv = 10 # calibration velocity\n maxdv = 10 # max velocity\n maxAcc = 100 # max acceleration\n maxDcc = 100 # max decelleration\n limit = 300 # limit of both axis to be checked\n\n # Controls\n # Drive and Calibration settings\n toCalibrate = False\n toHome = False\n\n # Play/Stop Controller\n play = True\n\n # motion select\n # 0 = Linear Sequence Move (DEFAULT)\n # 1 = Velocity Move\n # 2 = Single Point\n # 3 = Joystick\n motion_select = 0\n\n # linear sequence settings\n points_input = [[]]\n repeat = False\n reverse = False\n # Sequence Mode Setting\n # 0 = Relative to Start of Sequence\n # 1 = Relative to Limits (absolute) - (DEFAULT)\n # 2 = Relative to Home Position\n # 3 = Relative to Previous Points in Seq\n sequence_mode = 1\n\n # velocity move\n xDv = 0\n yDv = 0\n\n # single point settings\n # single_mode setting\n # 0 = Relative to Current Position\n # 1 = Relative to Limits(absolute) DEFAULT\n # 2 = Relative to Home Position\n single_mode = 1\n position = []\n\n ####################################################################\n # Class Methods #\n ####################################################################\n\n # connect to server\n # @param ip address str\n # @param port number int\n @staticmethod\n def connect(ip=None, port=None):\n LabviewInterface.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n if ip is None and port is None:\n LabviewInterface.server.connect(('192.168.0.4', 10000))\n else:\n LabviewInterface.server.connect((ip, port))\n LabviewInterface.incoming = IncomingThread()\n LabviewInterface.incoming.start()\n LabviewInterface.isQuit = False\n LabviewInterface.isFlush = False\n except:\n print(\n 'Problem connecting to server. Please make sure the server is on, the specified ip address and port number are correct, and that you can reach the server through the network i.e. traceroute')\n\n # disconnect once finished\n @staticmethod\n def disconnect():\n # Send end signal and stop threads\n LabviewInterface.end()\n LabviewInterface.incoming.stop()\n\n # send resources to the connected server\n @staticmethod\n def sendData(data):\n # convert string to bytes and send\n byteArray = bytearray(data, \"utf-8\")\n try:\n LabviewInterface.server.sendall(byteArray)\n except AttributeError:\n print('Please connect to the server before sending resources')\n\n # build LabviewInterface controller cluster as JSON string\n @staticmethod\n def toJSON():\n return '{\"Play\":{\"Play\":' + json.dumps(LabviewInterface.play) + ',\"Motion Select\":' + json.dumps(\n LabviewInterface.motion_select) + '},\"Home/Calibration Settings\":{\"HomeY\":' + str(\n LabviewInterface.home[1]) + ',\"Calibration/Home Velocity\":' + str(\n LabviewInterface.dv) + ',\"HomeX\":' + str(\n LabviewInterface.home[0]) + '},\"Drive and Calibrate/Home controls\":{\"Go to home\":' + json.dumps(\n LabviewInterface.toHome) + ',\"Calibrate\":' + json.dumps(\n LabviewInterface.toCalibrate) + ',\"Clear Faults\":true,\"Enable\":true},\"General Motion Settings\":{\"Max Velocity\":' + str(\n LabviewInterface.maxdv) + ',\"Max Acceleration\":' + str(\n LabviewInterface.maxAcc) + ',\"Max Decceleration\":' + str(\n LabviewInterface.maxDcc) + ',\"Max Accel Jerk\":0,\"Max Deccel Jerk\":0},\"Linear Sequance Settings\":{\"Points Input\":' + json.dumps(\n LabviewInterface.points_input) + ',\"Transition Mode\":0,\"Reverse\":' + json.dumps(\n LabviewInterface.reverse) + ',\"Sequance Move Mode\":' + json.dumps(\n LabviewInterface.sequence_mode) + ',\"Velocity profile smoothing\":0.5,\"Repeat\":' + json.dumps(\n LabviewInterface.repeat) + ',\"Delay between moves\":0},\"Velocity Move Settings\":{\"On hitting limit\":1,\"X Velocity\":' + json.dumps(\n LabviewInterface.xDv) + ',\"Y Velocity\":' + json.dumps(\n LabviewInterface.yDv) + '},\"Single point Settings\":{\"Go to Position\":' + json.dumps(\n LabviewInterface.position) + ',\"Move Mode\":' + json.dumps(\n LabviewInterface.single_mode) + '},\"Quit\":' + json.dumps(\n LabviewInterface.isQuit) + ',\"Flush\":' + json.dumps(LabviewInterface.isFlush) + '}'\n\n # send sequence to LabviewInterface\n @staticmethod\n def sendCluster():\n LabviewInterface.sendData(LabviewInterface.toJSON())\n # set controller back to default ready for next instruction\n LabviewInterface.setDefault()\n\n # set all controller controls back to default\n @staticmethod\n def setDefault():\n LabviewInterface.toCalibrate = False\n LabviewInterface.toHome = False\n LabviewInterface.play = True\n LabviewInterface.motion_select = 0\n LabviewInterface.points_input = [[]]\n LabviewInterface.repeat = False\n LabviewInterface.reverse = False\n LabviewInterface.sequence_mode = 1\n LabviewInterface.xDv = 0\n LabviewInterface.yDv = 0\n LabviewInterface.single_mode = 1\n LabviewInterface.position = []\n\n # Set home co-ordinates i.e. [10, 10] - 0 by default\n @staticmethod\n def setHome(point):\n LabviewInterface.home = point\n\n # Toggle x,y position of Table to console\n @staticmethod\n def toggleDisplay():\n if LabviewInterface.isDisplayingPosition:\n LabviewInterface.isDisplayingPosition = False\n else:\n LabviewInterface.isDisplayingPosition = True\n\n # stop current LabviewInterface sequence\n @staticmethod\n def stop():\n LabviewInterface.play = False\n LabviewInterface.sendCluster()\n\n # resume from next LabviewInterface sequence\n @staticmethod\n def resume():\n LabviewInterface.play = True\n LabviewInterface.sendCluster()\n\n # quit LabviewInterface server\n @staticmethod\n def end():\n LabviewInterface.isQuit = True\n LabviewInterface.isFlush = True\n LabviewInterface.play = False\n LabviewInterface.sendCluster()\n\n # flush current sequence queue\n @staticmethod\n def flush():\n LabviewInterface.isFlush = True\n LabviewInterface.sendCluster()\n LabviewInterface.isFlush = False\n\n # set calibrate velocity\n @staticmethod\n def setSpeed(dv):\n LabviewInterface.dv = dv\n\n # Calibrate LabviewInterface\n # Set calibrate to true and all others to default/false\n @staticmethod\n def calibrate():\n LabviewInterface.toCalibrate = True\n LabviewInterface.play = True\n LabviewInterface.motion_select = 1\n LabviewInterface.sendCluster()\n\n # Leave controller in play state to allow joystick input\n @staticmethod\n def joystick():\n LabviewInterface.play = True\n LabviewInterface.motion_select = 3\n LabviewInterface.sendCluster()\n\n # move LabviewInterface using Velocity Move\n @staticmethod\n def move(dx, dy):\n LabviewInterface.xDv = dx\n LabviewInterface.yDv = dy\n LabviewInterface.play = True\n LabviewInterface.motion_select = 1\n LabviewInterface.sendCluster()\n\n # move LabviewInterface relative to current position by x, y\n @staticmethod\n def moveBy(vector):\n LabviewInterface.position = vector\n LabviewInterface.single_mode = 0\n LabviewInterface.play = True\n LabviewInterface.motion_select = 2\n LabviewInterface.sendCluster()\n\n # move to home\n @staticmethod\n def moveHome():\n LabviewInterface.moveToPosition(LabviewInterface.home)\n\n # move to fixed position\n # # @param point = 1D array\n # # @param single_mode int\n # 0 = Relative to Current Position\n # 1 = Relative to Limits(absolute)\n # 2 = Relative to Home Position\n @staticmethod\n def moveToPosition(point, mode=None):\n LabviewInterface.play = True\n LabviewInterface.motion_select = 2\n LabviewInterface.position = point\n if mode is not None:\n LabviewInterface.single_mode = mode\n else:\n LabviewInterface.single_mode = 1\n LabviewInterface.sendCluster()\n\n # move to waypoints\n # @param waypoints 2D array\n # @param repeat bool\n # @param reverse bool\n # @param sequence_mode int\n # 0 = Relative to Start of Sequence\n # 1 = Relative to Limits (absolute) - Default\n # 2 = Relative to Home Position\n # 3 = Relative to Previous Points in Seq\n @staticmethod\n def moveToWayPoints(points, sequence_mode=None, repeat=None, reverse=None):\n # Set Defaults\n LabviewInterface.points_input = points\n LabviewInterface.play = True\n LabviewInterface.motion_select = 0\n LabviewInterface.sequence_mode = 1\n LabviewInterface.repeat = False\n LabviewInterface.reverse = False\n if sequence_mode is not None:\n LabviewInterface.sequence_mode = sequence_mode\n if repeat is not None:\n LabviewInterface.repeat = repeat\n if reverse is not None:\n LabviewInterface.reverse = reverse\n LabviewInterface.sendCluster()\n\n # TODO: add pressure interface\n @staticmethod\n def setPressure(pressure):\n pass\n\n # TODO: add rotation interface\n @staticmethod\n def setRotation(rotation):\n pass\n\n\n##################################################\n# INCOMING TCP SETTINGS #\n##################################################\n\nclass IncomingThread(Thread):\n def run(self):\n self.stillChatting = True\n while self.stillChatting: # wait for more incoming resources\n transmission = LabviewInterface.server.recv(1024).decode()\n lines = transmission.split('\\n')[:-1]\n i = 0\n while i < len(lines):\n command = lines[i].split()[0] # first keyword\n data = lines[i][len(command) + 1:] # remaining information\n j = json.loads(command + data)\n mypoints = j['position']\n if LabviewInterface.isDisplayingPosition:\n for point in mypoints:\n if len(point) > 1:\n print('(' + str(point[0]) + ',' + str(point[1]) + ')')\n i += 1\n\n def stop(self):\n self.stillChatting = False\n\n\ndef generateSine(amplitude):\n # sampling rate\n fs = 44100\n # all points in range\n t = np.arange(-0.0024, .004, 1.0 / fs)\n # frequency\n f0 = 1000\n # phases\n phi = np.pi / 2\n # amplitude\n A = amplitude\n # function\n return (A * np.sin(2 * np.pi * f0 * t + phi))\n\n\ndef sineByWayPoints(amplitude):\n LabviewInterface.setHome([1, amplitude + 10])\n x = generateSine(amplitude)\n i = 0\n sine = []\n for value in x:\n point = [0, 0]\n point[0] = i\n point[1] = float(\"{0:.1f}\".format(value))\n sine.append(point)\n i += 1\n LabviewInterface.moveToWayPoints(sine, 2)\n\n\ndef sineDemo(amplitude):\n LabviewInterface.connect()\n time.sleep(2)\n LabviewInterface.calibrate()\n LabviewInterface.setHome([1, amplitude + 10])\n sineByWayPoints(amplitude)\n\n\ndef testArrayLimit(size):\n sine = []\n for x in range(size):\n point = []\n if (x % 2 == 0):\n point.append(105)\n point.append(105)\n sine.append(point)\n else:\n point.append(100)\n point.append(100)\n sine.append(point)\n LabviewInterface.moveToWayPoints(sine)\n\n\ndef testSerialLimit(size):\n for x in range(size):\n if x > 0:\n point = []\n point.append(x % 300)\n point.append(x % 300)\n LabviewInterface.moveToPosition(point)\n","sub_path":"src/labview_socket_interface.py","file_name":"labview_socket_interface.py","file_ext":"py","file_size_in_byte":13342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"616248866","text":"from pymongo import MongoClient\n\nclass MongoConnect():\n\n def insert(self, obj):\n try:\n conexao = MongoClient('localhost', 27017)\n banco = conexao.n2linguagemprogramacao\n musica = banco.musica\n id = musica.insert_one(obj).inserted_id\n\n except Exception as e:\n print(obj)\n print(e)","sub_path":"atividade02/connection.py","file_name":"connection.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"73398934","text":"import paho.mqtt.client as mqtt\nimport os\nimport random\nimport json\nimport time\nimport csv\n\n__edge_device_connected_with_cloud = True\n__edge_device_connected_with_iot_devices = False\n\ndef on_connect(client, user__data, flags, rc):\n \"\"\" Prints at successful connection\n Define the connect callback implementation.\n\n Expected signature for MQTT v3.1 and v3.1.1 is:\n connect_callback(client, userdata, flags, rc, properties=None)\n\n and for MQTT v5.0:\n connect_callback(client, userdata, flags, reasonCode, properties)\n\n client: the client instance for this callback\n userdata: the private user data as set in Client() or userdata_set()\n flags: response flags sent by the broker\n rc: the connection result\n reasonCode: the MQTT v5.0 reason code: an instance of the ReasonCode class.\n ReasonCode may be compared to interger.\n properties: the MQTT v5.0 properties returned from the broker. An instance\n of the Properties class.\n For MQTT v3.1 and v3.1.1 properties is not provided but for compatibility\n with MQTT v5.0, we recommand adding properties=None.\n\n flags is a dict that contains response flags from the broker:\n flags['session present'] - this flag is useful for clients that are\n using clean session set to 0 only. If a client with clean\n session=0, that reconnects to a broker that it has previously\n connected to, this flag indicates whether the broker still has the\n session information for the client. If 1, the session still exists.\n\n The value of rc indicates success or not:\n 0: Connection successful\n 1: Connection refused - incorrect protocol version\n 2: Connection refused - invalid client identifier\n 3: Connection refused - server unavailable\n 4: Connection refused - bad username or password\n 5: Connection refused - not authorised\n 6-255: Currently unused.\n \"\"\" \n print(\"Connected with result code \" + str(rc))\n\ndef on_message(client, user__data, msg):\n \"\"\" Gets triggered when an subscribed topic receive an message.\n It will become an message from the edge device that will change the values and states \n of the machine. \n Define the message received callback implementation.\n\n Expected signature is:\n on_message_callback(client, userdata, message)\n\n client: the client instance for this callback\n userdata: the private user data as set in Client() or userdata_set()\n message: an instance of MQTTMessage.\n This is a class with members topic, payload, qos, retain.\n \"\"\"\n global __edge_device_connected_with_cloud\n global __edge_device_connected_with_iot_devices\n \n # Prints the topic and message\n print(msg.topic + \" \" + str(msg.payload))\n\n mqtt_msg = json.loads(msg.payload)\n\n if msg.topic == \"cloud/\":\n if bool(mqtt_msg[\"get_sensor_data\"]) or bool(mqtt_msg[\"publish_update\"]):\n __edge_device_connected_with_cloud = False\n \n if msg.topic == \"edge-device/\":\n __edge_device_connected_with_iot_devices = mqtt_msg[\"edge_device_connected_with_iot_devices\"]\n\nclient = mqtt.Client()\nclient.on_connect = on_connect\nclient.on_message = on_message\nclient.subscribe(\"cloud/#\")\nclient.subscribe(\"edge-device/#\")\nclient.connect(\"localhost\", 1883, 60)\nclient.loop_start()\n\nwhile True:\n \n print(\"__edge_device_connected_with_cloud: \" + str(__edge_device_connected_with_cloud)+ \"\\n__edge_device_connected_with_iot_devices: \"+ str(__edge_device_connected_with_iot_devices))\n\n input_action = int(input(\"1)Connect edge-device to iot devices \\n2)Connect edge-device to cloud \\n3)Disconnect edge-device from iot-devices \\n4)Disonnect edge-device from cloud \\ntype number:\"))\n\n if input_action == 1:\n __edge_device_connected_with_iot_devices = True\n\n elif input_action == 2:\n __edge_device_connected_with_cloud = True\n \n elif input_action == 3:\n __edge_device_connected_with_iot_devices = False\n \n elif input_action == 4:\n __edge_device_connected_with_cloud = False\n \n # Publish values of the iot_devices to edge device\n if input_action > 0 and input_action < 5:\n client.publish(\"simulation/\", json.dumps({\"edge_device_connected_with_iot_devices\": __edge_device_connected_with_iot_devices, \"edge_device_connected_with_cloud\": __edge_device_connected_with_cloud}))","sub_path":"simulation_input.py","file_name":"simulation_input.py","file_ext":"py","file_size_in_byte":4706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"465130710","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# ### Bits and pieces for Shop Env Monitor\n# \n# Source: thingspeak\n\n# In[183]:\n\n\nimport json\nimport thingspeak as thingspeak\nimport pandas as pd\nimport numpy as np\nimport datetime\nimport urllib.request\n\ntoday = datetime.datetime.utcnow().strftime('%Y-%m-%dT00:00:00Z')\nyesterday = (datetime.datetime.utcnow()-datetime.timedelta(days=1)).strftime('%Y-%m-%dT00:00:00Z')\n\nsbell = thingspeak.TSAccount('https://api.thingspeak.com/','869L0PHK8GKAIIYQ')\njsonout = json.loads(\"{}\")\n\njsonout.update({'datetime':datetime.datetime.utcnow().timestamp()})\njsonout.update({ \"messages\": \"Time is when last downloaded, not time of last measurement.\"})\njsonout.update({ \"days\": datetime.datetime.utcnow().day})\n\n### cellar\nsbellc = thingspeak.TSChannel(acc_host_addr='https://api.thingspeak.com/',api_key='QS3DYISJPLE5EQCW'\n,ch_id=1037066)\ndt11 = sbellc.get_a_channel_field_feed(['field1','field2'],parameters={'minutes':2})\ndt11_df = pd.DataFrame(dt11['feeds'])\ndt11_df = dt11_df.set_index(pd.DatetimeIndex(dt11_df['created_at']))\ndt11_df['field1'] = dt11_df['field1'].astype('float64')\ndt11_df['field2'] = dt11_df['field2'].astype('float64')\ndt11_df.rename(columns = {'field1':'temperature','field2':'humidity'},inplace = True)\n\nif dt11_df['temperature'].mean() <= 10:\n jsonout.update({\"Temp_Cellar_Alert\": \"alert alert-info\"})\nelif (dt11_df['temperature'].mean() > 10) and (dt11_df['temperature'].mean() < 20):\n jsonout.update({\"Temp_Cellar_Alert\": \"alert alert-warning\"})\nelif (dt11_df['temperature'].mean() >= 20):\n jsonout.update({\"Temp_Cellar_Alert\": \"alert alert-danger\"})\nelse:\n jsonout.update({\"Temp_Cellar_Alert\": \"\"})\n\njsonout.update({\"Temp_Cellar\": dt11_df['temperature'].mean()}) \n###\n\n### shop\nsbellc = thingspeak.TSChannel(acc_host_addr='https://api.thingspeak.com/',api_key='QS3DYISJPLE5EQCW'\n,ch_id=843357)\nbmp = sbellc.get_a_channel_field_feed(['field3','field4'],parameters={'minutes':15})\nbmp_df = pd.DataFrame(bmp['feeds'])\nbmp_df = bmp_df.set_index(pd.DatetimeIndex(bmp_df['created_at']))\nbmp_df['field3'] = bmp_df['field3'].astype('float64')\nbmp_df['field4'] = bmp_df['field4'].astype('float64')\nbmp_df.rename(columns = {'field3':'temperature','field4':'pressure'},inplace = True)\n\nif bmp_df['temperature'].mean() <= 10:\n jsonout.update({\"Temp_Shop_Alert\": \"alert alert-info\"})\nelif (bmp_df['temperature'].mean() > 10) and (dt11_df['temperature'].mean() < 20):\n jsonout.update({\"Temp_Shop_Alert\": \"alert alert-warning\"})\nelif bmp_df['temperature'].mean() >= 20:\n jsonout.update({\"Temp_Shop_Alert\": \"alert alert-danger\"})\nelse:\n jsonout.update({\"Temp_Shop_Alert\": \"\"})\n\njsonout.update({\"Temp_Shop\": bmp_df['temperature'].mean()}) \n\n \n###\n\n### Tysons Room\nsbellc = thingspeak.TSChannel(acc_host_addr='https://api.thingspeak.com/',api_key='QS3DYISJPLE5EQCW'\n,ch_id=1027974)\ntmp36 = sbellc.get_a_channel_field_feed('field1',parameters={'minutes':2})\ntmp36_df = pd.DataFrame(tmp36['feeds'])\ntmp36_df = tmp36_df.set_index(pd.DatetimeIndex(tmp36_df['created_at']))\ntmp36_df['field1'] = tmp36_df['field1'].astype('float64')\ntmp36_df.rename(columns = {'field1':'temperature'},inplace = True)\n \n\nif tmp36_df['temperature'].mean() <= 10:\n jsonout.update({\"Temp_Tyson_Alert\": \"alert alert-info\"})\nelif (tmp36_df['temperature'].mean() > 10) and (tmp36_df['temperature'].mean() < 20):\n jsonout.update({\"Temp_Tyson_Alert\": \"alert alert-warning\"})\nelif tmp36_df['temperature'].mean() >= 20:\n jsonout.update({\"Temp_Tyson_Alert\": \"alert alert-danger\"})\nelse:\n jsonout.update({\"Temp_Shop_Alert\": \"\"})\n\njsonout.update({\"Temp_Tyson\": tmp36_df['temperature'].mean()}) \n \n \n### DuckBarn\nsbellc = thingspeak.TSChannel(acc_host_addr='https://api.thingspeak.com/',api_key='QS3DYISJPLE5EQCW'\n,ch_id=1047747)\ntest = sbellc.get_a_channel_field_feed(['field1','field2','field3'],parameters={'minutes':1})\ntest_df = pd.DataFrame(test['feeds'])\ntest_df = test_df.set_index(pd.DatetimeIndex(test_df['created_at']))\ntest_df['field1'] = test_df['field1'].astype('float64')\ntest_df['field2'] = test_df['field2'].astype('float64')\ntest_df['field3'] = test_df['field3'].astype('float64')\ntest_df.rename(columns = {'field1':'temperature','field2':'temperature_2','field3':'humidity'},inplace = True)\n\nif test_df['temperature'].mean() <= 10:\n jsonout.update({\"Temp_DuckBarn_interior_Alert\": \"alert alert-info\"})\nelif (test_df['temperature'].mean() > 10) and (test_df['temperature'].mean() < 20):\n jsonout.update({\"Temp_DuckBarn_interior_Alert\": \"alert alert-warning\"})\nelif test_df['temperature'].mean() >= 20:\n jsonout.update({\"Temp_DuckBarn_interior_Alert\": \"alert alert-danger\"})\nelse:\n jsonout.update({\"Temp_DuckBarn_interior_Alert\": \"\"})\n\njsonout.update({\"Temp_DuckBarn_interior\": test_df['temperature'].mean()}) \n\nif test_df['temperature_2'].mean() <= 10:\n jsonout.update({\"Temp_DuckBarn_exterior_Alert\": \"alert alert-info\"})\nelif (test_df['temperature_2'].mean() > 10) and (test_df['temperature_2'].mean() < 20):\n jsonout.update({\"Temp_DuckBarn_exterior_Alert\": \"alert alert-warning\"})\nelif test_df['temperature_2'].mean() >= 20:\n jsonout.update({\"Temp_DuckBarn_exterior_Alert\": \"alert alert-danger\"})\nelse:\n jsonout.update({\"Temp_DuckBarn_exterior_Alert\": \"\"})\n\njsonout.update({\"Temp_DuckBarn_exterior\": test_df['temperature_2'].mean()}) \n \n \nwith open('MoonFlowerMonitor.json', 'w') as my_data_file:\n my_data_file.write(json.dumps(jsonout,indent=0))\n\n\n","sub_path":"swbell/ThingSpeak_Wx/python_src/FarmEnvMonitor.py","file_name":"FarmEnvMonitor.py","file_ext":"py","file_size_in_byte":5577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"242670442","text":"def solution(heights):\n answer = []\n for i, height in enumerate(heights):\n for j in range(i, -1, -1):\n if heights[j] > height:\n answer.append(j + 1)\n break\n if j == 0:\n answer.append(-1)\n\n return answer\n\n","sub_path":"programmers/lessons_42588/top.py","file_name":"top.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"624161520","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport socket\nimport multiprocessing\nimport threading\nimport os\nimport logging\nimport urllib\nimport argparse\nfrom datetime import datetime\n\nALLOWED_METHODS = (\"HEAD\", \"GET\")\nBUFSIZE = 1024\nSOCKET_TIMEOUT = 5\nSOMAXCONN = 128\n\nOK = 200\nBAD_REQUEST = 400\nFORBIDDEN = 403\nNOT_FOUND = 404\nNOT_ALLOWED = 405\nREQUEST_TIMEOUT = 408\nINTERNAL_ERROR = 500\n\nCODES = {\n OK: \"OK\",\n BAD_REQUEST: \"Bad request\",\n FORBIDDEN: \"Forbidden\",\n NOT_FOUND: \"Not Found\",\n NOT_ALLOWED: \"Method Not Allowed\",\n REQUEST_TIMEOUT: \"Request Timeout\",\n INTERNAL_ERROR: \"Internal Server Error\",\n}\n\nCONTENT_TYPES = {\n \"html\": \"text/html\",\n \"css\": \"text/css\",\n \"js\": \"application/javascript\",\n \"jpg\": \"image/jpeg\",\n \"jpeg\": \"image/jpeg\",\n \"png\": \"image/png\",\n \"gif\": \"image/gif\",\n \"swf\": \"application/x-shockwave-flash\",\n \"txt\": \"text/plain\"\n}\n\n\nclass OTUServer:\n def __init__(self, bind_ip, bind_port, rootdir, backlog, request_handler):\n self.server = None\n self.addr = bind_ip\n self.port = bind_port\n self.rootdir = rootdir\n self.somaxconn = backlog\n self.request_handler = request_handler\n self.make_socket()\n\n def make_socket(self):\n try:\n self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)\n self.server.bind((self.addr, self.port))\n self.server.listen(self.somaxconn)\n except socket.error as e:\n raise RuntimeError(e)\n\n def serve_forever(self):\n try:\n while True:\n client_sock, address = self.server.accept()\n client_sock.settimeout(SOCKET_TIMEOUT)\n logging.debug(\"Connection from %s:%s\" %\n (address[0], address[1]))\n request_handler = threading.Thread(\n target=self.request_handler,\n args=(self.rootdir, client_sock, address)\n )\n request_handler.start()\n logging.debug(\"Request handler running: %s, %s\" %\n (multiprocessing.current_process().name,\n request_handler.name))\n except:\n self.stop_server()\n\n def stop_server(self):\n logging.debug(\"Terminating. PID: %s\" %\n multiprocessing.current_process().pid)\n multiprocessing.current_process().terminate\n\n\nclass HTTPRequestHandler(object):\n index = \"index.html\"\n\n def __init__(self, rootdir, client_socket, client_address):\n self.rootdir = os.path.realpath(rootdir)\n self.client_socket = client_socket\n self.client_address = client_address\n self.request = \"\"\n self.path = \"\"\n self.code = None\n self.method = None\n self.http_version = \"HTTP/1.1\"\n self.headers = {\n \"Server\": \"OTUServer\"\n }\n self.body = None\n self.handle_request()\n\n def handle_request(self):\n request = self.recv_all()\n if not self.code:\n self.code = self.parse_request(request)\n self.build_response()\n self.send_response()\n logging.debug(\"%s:%s - [%s] '%s' - %d\" % (\n self.client_address[0],\n self.client_address[1],\n self.headers[\"Date\"],\n self.request,\n self.code))\n self.client_socket.close()\n\n def recv_all(self):\n try:\n while True:\n req = \"\"\n req_part = self.client_socket.recv(BUFSIZE)\n req += req_part.decode()\n if \"\\r\\n\\r\\n\" in req_part:\n break\n if not req_part:\n return None\n except socket.timeout:\n self.code = REQUEST_TIMEOUT\n req = \"\"\n return req\n\n def parse_request(self, req):\n try:\n self.request = req.splitlines()[0]\n method, uri, self.http_version = self.request.split()\n self.method = method.upper()\n except Exception:\n return BAD_REQUEST\n if method not in ALLOWED_METHODS:\n return NOT_ALLOWED\n uri = urllib.unquote_plus(uri)\n path_parts = None\n if \"?\" in uri:\n path_parts = uri.split(\"?\")\n elif \"#\" in uri:\n path_parts = uri.split(\"#\")\n if path_parts:\n uri = path_parts[0]\n self.path = os.path.realpath(self.rootdir + uri)\n return self.process_uri()\n\n def process_uri(self):\n prefix = os.path.commonprefix([self.rootdir, self.path])\n if prefix != self.rootdir:\n return FORBIDDEN\n if os.path.isdir(self.path):\n self.path = os.path.join(self.path, self.index)\n if not os.path.exists(self.path):\n return NOT_FOUND\n fn_parts = self.path.rsplit(\".\", 1)\n if fn_parts[1] in CONTENT_TYPES.keys():\n self.set_header(\"Content-Type\", CONTENT_TYPES[fn_parts[1]])\n self.set_header(\"Content-Length\", os.path.getsize(self.path))\n return OK\n\n def set_header(self, hdr, value):\n if hdr != \"Date\":\n self.headers[hdr] = value\n else:\n now = datetime.utcnow()\n self.headers[hdr] = now.strftime(\"%a, %d %b %Y %H:%M:%S\")\n\n def set_body(self):\n with open(self.path, mode=\"r\") as f:\n self.body = f.read(int(self.headers[\"Content-Length\"]))\n if self.body is None:\n self.code = INTERNAL_ERROR\n self.set_header(\"Content-Length\", \"0\")\n self.body = \"\"\n logging.error(\"Unknown error %s, path %s\" % (e, self.path))\n\n def build_response(self):\n self.set_header(\"Date\", \"now\")\n self.resp_header = \"%s %s %s\\r\\n\" % (\n self.http_version,\n self.code,\n CODES[self.code]\n )\n for k, v in self.headers.items():\n self.resp_header += \"%s: %s\\r\\n\" % (k, v)\n self.resp_header += \"\\r\\n\"\n if self.code == OK and self.method != \"HEAD\":\n self.set_body()\n\n def send_response(self):\n self.client_socket.sendall(self.resp_header)\n if self.body:\n self.client_socket.sendall(self.body)\n\n\ndef main(args):\n processes = []\n try:\n for i in range(int(args.workers)):\n server = OTUServer(args.addr,\n args.port,\n args.rootdir,\n SOMAXCONN,\n HTTPRequestHandler)\n p = multiprocessing.Process(target=server.serve_forever)\n processes.append(p)\n p.start()\n logging.debug(\"Starting worker %s. PID: %d, addr: %s, port: %d\" %\n (p.name, p.pid, args.addr, args.port))\n logging.info(\"Server at %s:%s started\" % (args.addr, args.port))\n for proc in processes:\n proc.join()\n except KeyboardInterrupt:\n logging.info(\"Server at %s:%s stopped\" % (args.addr, args.port))\n\n\nif __name__ == \"__main__\":\n arg_parser = argparse.ArgumentParser(description=\"Otus HTTP server\")\n arg_parser.add_argument(\"-i\", \"--addr\",\n default=\"127.0.0.1\", help=\"IP address\")\n arg_parser.add_argument(\"-p\", \"--port\", default=8008, help=\"TCP port\")\n arg_parser.add_argument(\"-r\", \"--rootdir\",\n default=\"./rootdir\", help=\"Root directory\")\n arg_parser.add_argument(\"-w\", \"--workers\", default=4, help=\"Workers count\")\n arg_parser.add_argument(\"-l\", \"--logfile\",\n default=None, help=\"Path to logfile\")\n arg_parser.add_argument(\"-d\", \"--debug\",\n action=\"store_true\", help=\"Debug mode\")\n args = arg_parser.parse_args()\n logging.basicConfig(filename=args.logfile,\n level=(logging.DEBUG if args.debug else logging.INFO),\n format=\"[%(asctime)s] %(levelname).1s %(message)s\",\n datefmt='%Y.%m.%d %H:%M:%S')\n try:\n main(args)\n except Exception as e:\n logging.error(\"Unexpected error: %s\" % e)\n","sub_path":"httpd.py","file_name":"httpd.py","file_ext":"py","file_size_in_byte":8254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"154742017","text":"from torch import optim\n\nimport settings\n\n\ndef define_optimizer_classifier(name, rate, model):\n if name not in settings.OPTIMIZERS:\n print(\"Optimizer unknown\")\n exit(1)\n\n if (name == \"ADAM\"):\n optimizer = optim.Adam(model.parameters(), lr=rate)\n elif (name == \"SGD\"):\n optimizer = optim.SGD(model.parameters(), lr=rate)\n elif (name == \"ADAM_TRANS\"):\n optimizer = optim.Adam(model.classifier.parameters(), lr=rate)\n elif (name == \"SGD_TRANS\"):\n optimizer = optim.SGD(model.classifier.parameters(), lr=rate)\n\n return optimizer \n\n\ndef define_optimizer_generator(name, rate, values):\n if name not in settings.OPTIMIZERS:\n print(\"Optimizer unknown\")\n exit(1)\n\n if (name == \"ADAM\"):\n optimizer = optim.Adam(values, lr=rate)\n\n return optimizer\n","sub_path":"mural/common/managers/optimizers.py","file_name":"optimizers.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"463769110","text":"# Experiment with scopes in Python.\n# Good reading: https://www.programiz.com/python-programming/global-local-nonlocal-variables\n\n# When you use a variable in a function, it's local in scope to the function.\nx = 12\n\n\ndef changeX():\n global x\n x = 99\n print(f\"1>>{x}\")\n\n\nchangeX()\n\n# This prints 12. What do we have to modify in changeX() to get it to print 99?\nprint(x)\n\n\n# This nested function has a similar problem.\n\ndef outer():\n # y = [120]\n # outer.y = 120\n y = 120\n\n def inner():\n #y[0] = 999\n # outer.y = 999\n nonlocal y\n y = 999\n inner()\n\n # This prints 120. What do we have to change in inner() to get it to print\n # 999? Google \"python nested function scope\".\n\n # 3 possible soulutions:\n # Create y as an interable in outer function and define y[0] in inner function\n # Specify variable name in outer function to mirror inner function on lines 24 and 29\n # Declare y as nonlocal in inner function, this only works with python3\n\n print(y)\n # print(outer.y)\n\n\nouter()\n","sub_path":"src/12_scopes.py","file_name":"12_scopes.py","file_ext":"py","file_size_in_byte":1052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"433458682","text":"\n\"\"\"Write a Python program to store marks scored in subject “Fundamental of Data\n Structure” by N students in the class. Write functions to compute following:\n\ta) The average score of class\n\tb) Highest score and lowest score of class\n\tc) Count of students who were absent for the test\n\td) Display mark with highest frequency\"\"\"\n\nfrom array import *\n\ndef getdata():\n\tglobal N\n\tN=int(input(\"Enter No. of Students:\"))\n\tmarks=array('f',[]) #f= typecode i.e float\n\t\n\tfor i in range(1,N+1):\n\t\tprint(\"Enter marks of students of roll no.\",i,\":\",end='')\n\t\tmark=float(input())\n\t\tmarks.append(mark)\n\t\n\tglobal score #sum of marks\n\tscore=[-1]\n\tfor j in range(N):\n\t\tscore.append(marks[j])\ndef absent():\n\tcount=-1\n\tfor i in range(N+1):\n\t\tcount=count+1\n\t\tif(score[i]!=-1):\n\t\t\tbreak\n\tprint(\"No. of absent Students : \",i-1)\n\t\ndef avg():\n\tsum=0\n\tfor i in range(1,N+1):\n\t\tif(score[i]==-1):\n\t\t\tcontinue\n\t\tsum=sum+score[i]\n\tprint(\"Average Score Of class is : \",sum/N)\n\t\ndef minmax():\n\tfor i in range(N-1):\n\t\tfor j in range(N-i):\n\t\t\tif(score[j]>score[j+1]):\n\t\t\t\ttemp=score[j]\n\t\t\t\tscore[j]=score[j+1]\n\t\t\t\tscore[j+1]=temp\t\t\t\n\tprint(\"Max Score of class is : \",score[N])\t\n\t\n\tfor i in range(N+1):\n\t\tif(score[i]!=-1):\n\t\t\tbreak\n\tprint(\"Min Score of Class is : \",score[i])\n\t\n\t\ndef frequency():\n\tmax=1\n\tcount=1\n\tscore1=[]\n\t\n\tfor i in range(1,N+1):\n\t\tif(score[i]==-1):\n\t\t\tcontinue\n\t\telse:\n\t\t\tscore1.append(score[i])\n\t\n\tfor i in range(len(score1)):\n\t\tif (score1[i]==score1[i-1]):\n\t\t\tcount+=1\n\t\t\t\n\t\telse:\n\t\t\tif (count>max):\n\t\t\t\tmax=count\n\t\t\t\tprint(score1[i-1])\n\t\t\t\t\n\t\t\tcount=1\n\t\t\t\n\tif (count>max):\n\t\tmax=count\n\t\tprint(max)\n\t\tprint(\"Maximum repeated Score is : \",score1[i-1])\n\t\t\t\t\ngetdata()\navg()\nabsent()\nminmax()\nfrequency()\n","sub_path":"A2.py","file_name":"A2.py","file_ext":"py","file_size_in_byte":1701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"519185264","text":"from collections import Counter, defaultdict\nimport sys\nimport math\n\n\nclass NBClassifier:\n \"\"\"\n Implements a naive Bayes classifier.\n\n Initialize it with a list of categories:\n >>> classifier = NBCLassifier([\"class1\", \"class2\"])\n\n Then train it with a list of documents. Documents are 2-tuples of the form:\n [(\"Text of document\", \"class1\"), ...]\n >>> classifier.train(list_of_docs)\n\n Then have it predict the category of a string:\n >>> classifier.predict(some_string)\n \"\"\"\n def __init__(self, categories):\n \"\"\"\n Args:\n categories: list of categories (strings)\n \"\"\"\n self.categories = categories\n self.Ndoc = 0\n self.catdict = defaultdict(list) # mapping from category to words in that category\n self.catcount = defaultdict(int) # mapping from category to num docs in that category\n self.V = set() # the vocabulary of words\n\n self.logprior = {}\n self.logliklihood = {}\n\n def train(self, documents):\n \"\"\"\n Args:\n documents: a list of tuples of strings: [(text, category), ..]\n \"\"\"\n self.Ndoc += len(documents)\n for doc in documents:\n text, cat = doc\n new_words = text.split()\n\n # update bag of words\n self.catdict[cat].extend(new_words)\n\n # update count of docs in cat\n self.catcount[cat] += 1\n\n # update vocabulary\n self.V.update(new_words)\n\n # update probability of word given cat\n for cat in self.categories:\n # update logprior\n Nc = self.catcount[cat]\n if Nc == 0:\n self.logprior[cat] = sys.float_info.min_exp\n else:\n self.logprior[cat] = math.log(Nc/self.Ndoc)\n\n # get and count the bag of words for all documents in cat\n word_counts = Counter(self.catdict[cat])\n totals = sum(word_counts.values())\n\n # calculate log liklihoods for each word given cat\n # (Uses +1 smoothing)\n for word in self.V:\n count = word_counts[word] + 1\n self.logliklihood[(word, cat)] = math.log(count/(totals+len(self.V)))\n\n def predict(self, text):\n \"\"\"\n Args:\n text: string to classify\n\n returns the most likely category.\n \"\"\"\n sums = []\n for cat in self.categories:\n sums.append(self.logprior[cat])\n for word in text.split():\n if word in self.V:\n #print(word, cat)\n #print(logliklihood.get((word, cat), sys.float_info.min_exp))\n sums[-1] += self.logliklihood.get((word, cat), 0)\n return self.categories[sums.index(max(sums))]\n","sub_path":"Code/nbclassifier_cb.py","file_name":"nbclassifier_cb.py","file_ext":"py","file_size_in_byte":2814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"357004912","text":"import dns.resolver\nfrom dns import resolver\nimport json\nimport sys\nimport argparse\nimport logging\n\nfrom domutils import getparent\n\n\nclass Authority(object):\n def __init__(self, zone):\n self.zone = zone.lower()\n\n self.NSrecords = set()\n self.zoneItDepends = set()\n self.extZonesItDepends = set()\n self.zoneHasAtLeastOneNSinBailiwick = False\n self.allNSinBailiwick = False\n self.resolvable = None\n\n def addNS(self, ns):\n if ns[-1] != \".\":\n ns = ns + \".\"\n self.NSrecords.add(ns)\n\n def calcParentZones(self):\n inZoneCounter = 0\n for i in self.NSrecords:\n # if 'bio-bak' in i:\n # print('wait')\n parent = getparent(i)\n parentLenght = len(parent.split(\".\"))\n if parentLenght> 0:\n if parent[-1] != \".\":\n parent = parent + \".\"\n else:\n print(\"CHECK: parent has zero length\")\n isParentTLD = False\n # if len==2 , then is a TLD\n if parentLenght == 2:\n isParentTLD = True\n self.zoneHasAtLeastOneNSinBailiwick = True\n inZoneCounter = inZoneCounter + 1\n self.zoneItDepends.add(i)\n elif parentLenght > 2 and parent != self.zone:\n self.zoneItDepends.add(parent)\n else:\n self.zoneHasAtLeastOneNSinBailiwick = True\n self.zoneItDepends.add(parent)\n inZoneCounter = inZoneCounter + 1\n\n if inZoneCounter == len(self.NSrecords):\n self.allNSinBailiwick = True\n\n self.extZonesItDepends = self.zoneItDepends\n if self.zone in self.extZonesItDepends:\n self.extZonesItDepends.remove(self.zone)\n\n return self.extZonesItDepends\n\n\ndef makeAuth(bugged):\n timeOutZones = dict()\n\n for ns, authoritySection in bugged.items():\n for zone, nsset in authoritySection.items():\n # If the zone is in the timeOutZones, get it, if not, generate an Authority object\n temp_zone = timeOutZones.get(zone, Authority(zone))\n\n # add NSes\n for i in nsset:\n temp_zone.addNS(i.lower())\n\n timeOutZones[zone.lower()] = temp_zone\n\n return timeOutZones\n\n\ndef getZonesWithoutInBailiwickServer(timeOutZones):\n timeOutWOBailick = dict()\n for k, v in timeOutZones.items():\n\n tempV = v.calcParentZones()\n timeOutZones[k] = v\n # print(\"only add zones with ALL NSes all of bailiwkc\")\n\n # print(str(v.zoneHasAtLeastOneNSinBailiwick)+ \",\" + str(v.allNSinBailiwick))\n\n if not v.allNSinBailiwick:\n timeOutWOBailick[k] = v\n\n # if v.zoneHasAtLeastOneNSinBailiwick!=v.allNSinBailiwick:\n # print('wait here')\n return timeOutWOBailick\n\n\ndef figureParentRecords(domain):\n parentZone = getparent(domain)\n\n toBeRet = []\n\n localRes = resolver.Resolver()\n localRes.timeout = 5\n localRes.lifetime = 5\n answer = ''\n try:\n answer = localRes.resolve(parentZone, 'NS')\n except Exception as e:\n logging.error(f\"Getting NS for {domain} triggered exception {e}\")\n return 'NXDOMAIN'\n\n if answer != '':\n response = answer.response\n # print(type(response))\n rcode = response.rcode()\n\n # parent is valid\n\n if rcode == 0:\n try:\n localA = response.answer\n for k in localA:\n for addr in k.items:\n tempNS = ''\n try:\n tempNS = addr.target\n toBeRet.append(str(tempNS))\n except Exception as e:\n print(e)\n # print(type(e))\n except:\n print('no NS')\n pass;\n elif rcode == 3:\n print('does not exist')\n toBeRet.append(-1)\n\n return toBeRet\n\n\ndef getDeps(timeOutWOBailick):\n codep = dict()\n for zone, localAuth in timeOutWOBailick.items():\n tempDepzone = localAuth.zoneItDepends\n codep[zone] = tempDepzone\n\n return codep\n\n\ndef getAAAA(ns):\n localRes = resolver.Resolver()\n localRes.timeout = 5\n localRes.lifetime = 5\n address = []\n try:\n\n answer = localRes.resolve(ns, 'AAAA')\n response = answer.response\n # print(type(response))\n rcode = response.rcode()\n if rcode == 0:\n try:\n localA = response.answer\n for k in localA:\n for addr in k.items:\n address.append(str(addr))\n except:\n print('no A')\n pass;\n\n elif rcode == 3:\n address.append(-1)\n except Exception as e:\n print(e)\n\n return address\n\n\ndef getA(ns):\n localRes = resolver.Resolver()\n localRes.timeout = 5\n localRes.lifetime = 5\n address = []\n try:\n\n answer = localRes.resolve(ns, 'A')\n response = answer.response\n # print(type(response))\n rcode = response.rcode()\n if rcode == 0:\n try:\n localA = response.answer\n for k in localA:\n for addr in k.items:\n address.append(str(addr))\n except:\n print('no A')\n pass;\n\n elif rcode == 3:\n address.append(-1)\n except Exception as e:\n print(e)\n\n return address\n\n\ndef retrieveNSFromParent(fqdn, ipFromAuthServer):\n queryType = dns.rdatatype.NS\n\n try:\n ipFromAuthServer = ipFromAuthServer[0]\n except:\n print(\"the error is \" + str(ipFromAuthServer))\n query = dns.message.make_query(fqdn, queryType)\n\n # initialize var\n response = -1\n\n ret = dict()\n try:\n response = dns.query.udp(query, ipFromAuthServer, timeout=5)\n except Exception as e:\n print('stope here')\n response = \"NA\"\n\n if response != \"NA\":\n if len(response.answer) > 0:\n\n isCNAME = False\n cnameValue = \"\"\n\n for item in response.answer:\n namez = str(item.name).lower()\n if item.rdtype == 5:\n # cnameValue = str(item.name).lower()\n for singleI in item.items:\n tempV = str(singleI.target).lower()\n cnameValue = tempV\n\n if namez not in ret:\n tempL = []\n tempL.append(cnameValue)\n ret[namez] = tempL\n else:\n tempL = ret[namez]\n tempL.append(cnameValue)\n ret[namez] = tempL\n\n else:\n print('aint cname')\n\n return ret\n elif len(response.answer) == 0 and len(response.authority) > 0:\n rcode = response.rcode()\n\n if rcode == 0:\n\n for item in response.authority:\n if item.rdtype == 6:\n # print(\"has soa, all GOOD\")\n return 'SOA'\n elif item.rdtype == 2:\n\n for addr in item.items:\n\n namez = item.name\n namez = str(namez)\n\n if namez not in ret:\n tempL = []\n tempL.append(str(addr))\n ret[namez] = tempL\n else:\n tempL = ret[namez]\n tempL.append(str(addr))\n ret[namez] = tempL\n\n return ret\n elif rcode == 3:\n return 'NXDOMAIN'\n\n\ndef getNS2(parent):\n # get the parent\n\n toBeRet = []\n\n localRes = resolver.Resolver()\n localRes.timeout = 5\n localRes.lifetime = 5\n answer = ''\n try:\n\n answer = localRes.resolve(parent, 'NS')\n except Exception as e:\n if 'timed out' in str(e):\n toBeRet.append('TIMEOUT')\n return toBeRet\n if answer != '':\n\n response = answer.response\n # print(type(response))\n rcode = response.rcode()\n\n # parent is valid\n\n if rcode == 0:\n\n try:\n localA = response.answer\n for k in localA:\n for addr in k.items:\n tempNS = ''\n try:\n tempNS = addr.target\n toBeRet.append(str(tempNS))\n except Exception as e:\n print(e)\n\n # print(type(e))\n except:\n print('no NS')\n pass;\n\n elif rcode == 3:\n print('does not exist')\n toBeRet.append('NXDOMAIN')\n\n return toBeRet\n\n\ndef findParents(x):\n results = dict()\n\n '''\n 1. get soa - see if it resolves \n 1a. if it works, proceed to 2\n 1b. if soa does not work, then have to find the real parent recursively utnil get an answer\n 2. for the nmname is soa list, then ask this mname for all NS records of the parent zone\n '''\n soaRec = getSOA(x)\n\n # has soa\n if len(soaRec) == 0:\n logging.warning(f\"Domain {x} has no soa\")\n parentX = getparent(x)\n if parentX[-1] != \".\":\n localParent = parentX + \".\"\n else:\n localParent = parentX\n\n nsLocalParent = ''\n try:\n nsLocalParent = getNS(localParent)\n except Exception as e:\n print(\"failed to get NS from zone at findParents\" + str(x))\n print(e)\n print(type(e))\n return -1\n\n if nsLocalParent != '':\n # print(\"analyze here\")\n for k in nsLocalParent:\n tempA = getA(k)\n tempAAAA = getAAAA(k)\n\n gotAnswer = False\n\n if gotAnswer == False:\n if len(tempA) > 0:\n # query it\n\n resParent = retrieveNSFromParent(x, tempA)\n\n if isinstance(resParent, dict):\n # then got results\n gotAnswer = True\n tempAuth = Authority(x)\n for k, v in resParent.items():\n for singleNS in v:\n tempAuth.addNS(singleNS)\n tempAuth.calcParentZones()\n results[x] = tempAuth.zoneItDepends\n return results\n\n elif isinstance(resParent, str):\n if resParent == 'NXDOMAIN':\n return \"NXDOMAIN\"\n else:\n print(\"FAILED hon here too\")\n print(resParent)\n return resParent\n\n\n\n else:\n\n return nsLocalParent\n else:\n # the domain resolves, the soa record shows the first avail auth server.\n for k, v in soaRec.items():\n parentsK = ''\n try:\n parentsK = getNS2(k)\n except Exception as e:\n print(\"failed to get NS from zone that has soa on findParents \" + str(k))\n print(e)\n print(type(e))\n\n if parentsK != '':\n\n tempAuth = Authority(x)\n for singleNS in parentsK:\n tempAuth.addNS(singleNS)\n tempAuth.calcParentZones()\n results[x] = tempAuth.zoneItDepends\n return results\n\n else:\n print('FOUND SOA< but not NS, could not happen')\n return -2\n\n\ndef getDepZonesRecursive(x):\n '''\n this method gets the zones each NS depends on, and put them in a set or something\n the idea is to determine on what zones domain x depends on\n\n steps\n 1. try to get the NSes of the X. If it works, fine! add them and return a dict\n 2. If it fails, then recursively get the parent until someone responds with NS.\n And from that, get is NS records\n '''\n\n isOK = False\n NSworks = False\n results = dict()\n parentNS = ''\n try:\n parentNS = getNS2(x)\n if isinstance(parentNS, list):\n if len(parentNS) > 0:\n if parentNS[0] != 'TIMEOUT':\n isOK = True\n NSworks = True\n except Exception as e:\n print(\"failed to get NS from zone\" + str(x))\n print(e)\n print(type(e))\n\n # in case there wer errors, need to fix this\n if isOK == False:\n try:\n parentNSX = findParents(x)\n '''\n results from the method above\n * dict - it worked\n * -1: no soa\n * -2 : soa, but no ns (should never happen I guess)\n \n '''\n if isinstance(parentNSX, dict):\n isOK = True\n # then, convert it to list\n tempP = []\n for k, v in parentNSX.items():\n for singleNS in v:\n parentNS.append(singleNS)\n\n elif parentNSX == -1 or parentNSX == -2:\n isOK = False\n elif parentNS == 'NXDOMAIN':\n results[x] = 'NXDOMAIN'\n return results\n\n except Exception as e:\n print(\"failed to find parent NS from zone\" + str(x))\n print(e)\n print(type(e))\n\n # this is when you can retrive the NS of the parent; no biggie here, when the NS is reachable\n if isOK == True:\n tempAuth = Authority(x)\n for k in parentNS:\n tempAuth.addNS(k)\n tempAuth.calcParentZones()\n if NSworks == True:\n tempAuth.resolvable = True\n 'TODO: fix here, add support to cnnam and disregard '\n\n whatZones = tempAuth.zoneItDepends\n if len(whatZones) == 0:\n whatZones = ''\n results[x] = tempAuth\n return results\n else:\n\n return 'NXDOMAIN'\n\n\ndef getDepZones(x):\n # this code gets all the zones a certain zone depends\n # example, giovane-moura.nl depends on webreus.nl\n isOK = False\n results = dict()\n\n localNSes = getNS(x)\n timeOUtButNotFromParent = False\n\n if localNSes == \"TIMEOUT\" or localNSes == \"NXDOMAIN\":\n logging.info(f\"Domain {x}: NX or bailiwick?\")\n tempP = getparent(x)\n\n if tempP[-1] != '.':\n tempP = tempP + \".\"\n parentNS = getNS(tempP)\n\n # print(parentNS)\n timeOUtButNotFromParent = True\n if parentNS != 'TIMEOUT':\n for singleNS in parentNS:\n if isOK == False and singleNS not in results:\n tempA = getA(singleNS)\n if tempA != -1:\n tempNSParent = retrieveNSFromParent(x, tempA)\n # we only add domains here if they timeout\n if timeOUtButNotFromParent == True and isinstance(tempNSParent, dict):\n\n results[x] = tempNSParent\n # for key, v in tempNSParent.items():\n # if key not in results:\n\n # results[key]=v\n # else:\n # print('been tghere done that')\n\n isOK = True\n elif tempNSParent == 'SOA':\n isOK = True\n # do nothing domain is ok\n elif tempNSParent == 'NXDOMAIN':\n isOK = True\n return 'NXDOMAIN'\n else:\n print(\"Parent does no work,try to get soa\")\n # try get soa\n\n parentNS = getSOA(tempP)\n\n return 'BROKEN NS'\n return results\n elif len(localNSes) > 0:\n results[x] = localNSes\n return results\n\n\ndef getNS(parent):\n # get the parent\n\n toBeRet = []\n\n localRes = resolver.Resolver()\n localRes.timeout = 5\n localRes.lifetime = 5\n answer = ''\n try:\n\n answer = localRes.resolve(parent, 'NS')\n except Exception as e:\n print(e)\n\n print(type(e))\n return 'NXDOMAIN'\n\n if answer != '':\n\n response = answer.response\n # print(type(response))\n rcode = response.rcode()\n\n # parent is valid\n\n if rcode == 0:\n\n try:\n localA = response.answer\n for k in localA:\n for addr in k.items:\n tempNS = ''\n try:\n tempNS = addr.target\n toBeRet.append(str(tempNS))\n except Exception as e:\n print(e)\n\n # print(type(e))\n except:\n print('no NS')\n pass;\n\n elif rcode == 3:\n print('does not exist')\n toBeRet.append(-1)\n\n return toBeRet\n\n\ndef getSOA(ns):\n localRes = resolver.Resolver()\n localRes.timeout = 5\n localRes.lifetime = 5\n answer = ''\n soa = dict()\n # try to get a SOA, if it fails return ERROR\n try:\n answer = localRes.resolve(ns, 'SOA')\n except Exception as e:\n print(e)\n if 'does not contain an answer' in str(e):\n tempDict = e.kwargs\n response = tempDict['response']\n tempSOA = []\n authZone = ''\n for k in response.authority:\n authZone = str(k.name)\n for singleItem in k.items:\n tempV = str(singleItem.mname)\n tempSOA.append(tempV)\n soa[authZone] = tempSOA\n return soa\n\n else:\n logging.info(f\"BROKEN SOA for {ns}\")\n return soa\n\n if answer != '':\n response = answer.response\n # print(type(response))\n rcode = response.rcode()\n if rcode == 0:\n return 0\n elif rcode == 3:\n return -1\n elif rcode == 2:\n return \"ERROR\"\n\n\ndef sortDeps(codependency, timeOutWOBailick):\n cyclicDependentZones = dict()\n cyclicDependentZones['notResolvable'] = dict()\n\n cyclicDependentZones['Resolvable'] = dict()\n\n clearedForNX = set()\n clearedZonesForOK = set()\n\n bailiwickZonesOK = set()\n domainsThatFailedBUtNotCyclicDependency = set()\n total = str(len(codependency))\n counter = 0\n for zone, extDepentZones in codependency.items():\n reverseDep = set()\n counter = counter + 1\n logging.info(f\"Analyzying {zone}. Domain {counter}/{total}\")\n\n # to be cyclic dependent, all zones here must from tempDepZone must point to zone\n\n for singleZone in extDepentZones:\n\n externalAgain = ''\n localDepZones = ''\n if singleZone in timeOutWOBailick:\n localDepZones = timeOutWOBailick[singleZone].zoneItDepends\n\n else:\n # externalAgain = getDepZones(singleZone)\n externalAgain = getDepZonesRecursive(singleZone)\n\n # if we get a dict\n if isinstance(externalAgain, dict):\n localDepZones = set()\n for ext2zone, dep2zone in externalAgain.items():\n if isinstance(dep2zone, str) == False:\n for singelDep2Zone in dep2zone.zoneItDepends:\n if dep2zone != '':\n localDepZones.add(singelDep2Zone.lower())\n\n elif externalAgain == \"NXDOMAIN\":\n clearedForNX.add(zone)\n elif externalAgain == \"OK\":\n clearedZonesForOK.add(zone)\n\n elif externalAgain == \"BROKEN\":\n print(\"broken NS\")\n\n # now, process localDepZone\n hasDiffZone = False\n for k in localDepZones:\n if k.lower() == zone.lower() and hasDiffZone == False:\n # domain is cyclic depednet\n if zone.lower() not in cyclicDependentZones:\n\n resolvable = timeOutWOBailick[zone.lower()].resolvable\n if resolvable == True:\n tempL = cyclicDependentZones['Resolvable']\n tempL[zone.lower()] = singleZone\n cyclicDependentZones['Resolvable'] = tempL\n else:\n tempL = cyclicDependentZones['notResolvable']\n tempL[zone.lower()] = singleZone\n cyclicDependentZones['notResolvable'] = tempL\n\n else:\n print('it shoudl not get here I guess')\n\n elif not hasDiffZone:\n # print(zone.lower() + ' is not cyclic dependent')\n domainsThatFailedBUtNotCyclicDependency.add(zone.lower())\n hasDiffZone = True\n elif hasDiffZone:\n pass\n\n return cyclicDependentZones\n\n\ndef sortDepsNew(timeOutWOBailick):\n \"\"\"\n codependency= list of all zones that the timeout NSes depend\n timeOutWOBailick =dictionary with Authority objects\n\n goal : iterate over codependency and return a dict like timeOutWithoutBailick\n\n \"\"\"\n\n # dict with the new crated Auth\n newAuth = dict()\n\n # to be returned\n cyclicDependentZones = dict()\n cyclicDependentZones['partialDep'] = dict()\n cyclicDependentZones['fullDep'] = dict()\n cyclicDependentZones['fullDepWithInzone'] = dict()\n\n # unique zones from all timeout domais\n zoneAndDeps = getDeps(timeOutWOBailick)\n\n # dict clear with zones with NX\n clearedForNX = []\n\n # OK zones now\n clearedZonesForOK = []\n\n # clear with multiple zones\n clearedZonesForMultipleZones = []\n\n # other failed domains\n domainsThatFailedBUtNotCyclicDependency = []\n\n total = str(len(zoneAndDeps))\n counter = 0\n\n for zone, extDepentZones in zoneAndDeps.items():\n reverseDep = set()\n counter = counter + 1\n print('analyzying ' + zone + \". Domain \" + str(counter) + \" from \" + total)\n\n # to be cyclic dependent, all zones here must from tempDepZone must point to zone\n if True:\n # evaluate every single zone they depend on\n for singleZone in extDepentZones:\n\n localDepZones = ''\n tempAuthDict = dict()\n if singleZone in timeOutWOBailick:\n\n tempAuthDict[singleZone] = timeOutWOBailick[singleZone]\n localDepZones = set()\n for ext2zone, dep2zone in tempAuthDict.items():\n if isinstance(dep2zone, str) == False:\n for singelDep2Zone in dep2zone.zoneItDepends:\n if dep2zone != '':\n localDepZones.add(singelDep2Zone.lower())\n\n else:\n\n tempAuthDict = getDepZonesRecursive(singleZone)\n\n # if we get a dict\n if isinstance(tempAuthDict, dict):\n localDepZones = set()\n for ext2zone, dep2zone in tempAuthDict.items():\n if isinstance(dep2zone, str) == False:\n for singelDep2Zone in dep2zone.zoneItDepends:\n if dep2zone != '':\n localDepZones.add(singelDep2Zone.lower())\n\n elif tempAuthDict == \"NXDOMAIN\":\n clearedForNX.append(zone)\n elif tempAuthDict == \"OK\":\n clearedZonesForOK.append(zone)\n\n elif tempAuthDict == \"BROKEN\":\n print(\"broken NS\")\n\n # add tempAuthDict to the new domains\n hasDiffZone = False\n if isinstance(tempAuthDict, dict):\n for k, v in tempAuthDict.items():\n v.calcParentZones()\n newAuth[k] = v\n\n if v.extZonesItDepends != None:\n hasDiffZone = True\n\n # now, process localDepZone\n if len(localDepZones) > 0:\n\n # there are two cateogries of codep: one fullY (1to1, then there's one only localDepzone)\n # full DEP\n if len(localDepZones) == 1:\n\n for k in localDepZones:\n\n # the error is here: must be not ONLY zone, but ALL\n\n if k.lower() == zone.lower() and hasDiffZone == True:\n # domain is cyclic depednet\n\n if timeOutWOBailick[zone].zoneHasAtLeastOneNSinBailiwick == False:\n\n if zone.lower() not in cyclicDependentZones['fullDep']:\n\n fp = cyclicDependentZones['fullDep']\n fp[zone] = singleZone\n cyclicDependentZones['fullDep'] = fp\n\n else:\n print('it shoudl not get here I guess')\n\n else:\n\n if zone.lower() not in cyclicDependentZones['fullDepWithInzone']:\n fp = cyclicDependentZones['fullDepWithInzone']\n fp[zone] = singleZone\n cyclicDependentZones['fullDepWithInzone'] = fp\n\n\n elif hasDiffZone == False:\n # print(zone.lower() + ' is not cyclic dependent')\n domainsThatFailedBUtNotCyclicDependency.append(zone.lower())\n # hasDiffZone=True\n else:\n # print('waht now')\n pass\n else:\n clearedZonesForMultipleZones.append(zone.lower())\n\n\n elif len(localDepZones) > 1:\n for k in localDepZones:\n\n # the error is here: must be not ONLY zone, but ALL\n\n if k.lower() == zone.lower() and hasDiffZone == True:\n # domain is cyclic depednet\n if zone.lower() not in cyclicDependentZones['partialDep']:\n print(\"object has no resolvable, damn it \")\n pd = cyclicDependentZones['partialDep']\n pd[zone.lower()] = singleZone\n cyclicDependentZones['partialDep'] = pd\n\n\n else:\n print('it shoudl not get here I guess')\n\n elif hasDiffZone == False:\n # print(zone.lower() + ' is not cyclic dependent')\n domainsThatFailedBUtNotCyclicDependency.append(zone.lower())\n # hasDiffZone=True\n else:\n # print('waht now')\n pass\n else:\n clearedZonesForMultipleZones.append(zone.lower())\n else:\n print('zone has been eval')\n pass\n return cyclicDependentZones\n\n\ndef classZones(domains):\n ret = dict()\n ret['FullyCyclic'] = []\n ret['CyclicBUtResolvableGivenInZone'] = []\n for k, v in domains.items():\n\n parentsK = ''\n try:\n parentsK = getNS2(k)\n except Exception as e:\n print(\"failed to get NS from zone that has soa on findParents \" + str(k))\n print(e)\n print(type(e))\n\n if parentsK != '':\n tempL = ret['FullyCyclic']\n tempDict = dict()\n tempDict[k] = v\n tempL.append(tempDict)\n ret['FullyCyclic'] = tempL\n else:\n tempL = ret['CyclicBUtResolvableGivenInZone']\n tempDict = dict()\n tempDict[k] = v\n tempL.append(tempDict)\n ret['CyclicBUtResolvableGivenInZone'] = tempL\n\n return ret\n\n\ndef find_cycles(timeout_file=None, output_file=None):\n print(\"Step 1: read timed out zones\")\n with open(timeout_file) as f:\n bugged = json.load(f)\n\n # create Authority objects from timed out zones\n print(\"Step 2: create Authority objects\")\n timeOutZones = makeAuth(bugged)\n\n print(\"Step 3: get only zones without in-bailiwick/in-zone authoritative servers\")\n zonesWoBailiwick = getZonesWithoutInBailiwickServer(timeOutZones)\n\n print(\"Step 4: sort which ones are cyclic\")\n cyclic = sortDepsNew(zonesWoBailiwick)\n\n # print it\n\n # print(\"step 6: classifying domains as resolvanble and not resolvable\")\n # classified=classZones(cyclic)\n\n print(\"step 7: writing down results\")\n if len(cyclic)>0:\n with open(output_file, 'w') as fp:\n json.dump(cyclic, fp)\n else:\n logging.info('Warning: no cylic dependent NS records found. Stopping here ')\n sys.exit(output_file + \" cylic dependent NS records found. Stopping here \")\n\n\nif __name__ == '__main__':\n # Setup logging if called from command line\n logging.basicConfig(filename='cycle-finder.log',\n level=logging.INFO, format=\"%(asctime)s cycle_finder: %(levelname)s %(message)s\")\n # Read the command line arguments\n argparser = argparse.ArgumentParser(\n description=\"Verifies timed out NS, either parent or child, and checks the ones with cyclic dependency\")\n argparser.add_argument('timeout_file', type=str, help=\"File with the timeout output from CyclicDetector.py\")\n argparser.add_argument('cycle_output', type=str, help=\"File to save the cycles detected\")\n args = argparser.parse_args()\n\n find_cycles(timeout_file=args.timeout_file, output_file=args.cycle_output)\n","sub_path":"findCyclicDep.py","file_name":"findCyclicDep.py","file_ext":"py","file_size_in_byte":30915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"341651949","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nfrom utils.alphabet import *\n\nclass TabulaRecta(object):\n def __init__(self, msg_alphabet=None, key_alphabet=None):\n if not msg_alphabet:\n msg_alphabet = Alphabet()\n if not key_alphabet:\n key_alphabet = msg_alphabet\n self.msg_alphabet = msg_alphabet\n self.key_alphabet = key_alphabet\n row_keys = tuple(k for k in key_alphabet.elements)\n self.keyed_table = dict((row_keys[i], msg_alphabet << i) for i in range(len(key_alphabet.elements)))\n self.index_table = tuple(msg_alphabet << i for i in range(len(key_alphabet)))\n\n def key_row(self, k):\n \"\"\" Return a full (possibly-wrapped) alphabet from a given row, denoted by initial char `key_char` \"\"\"\n return self.keyed_table.get(k, None)\n\n # def idx_row(self, k):\n # idx = self.key_alphabet.find(k)\n # if idx != -1:\n # return self.index_table[idx]\n # return None\n\n def intersect(self, msg_char, key_char):\n \"\"\" Locate character at intersection of characters `a` and `b` \"\"\"\n r = self.key_row(key_char)\n if r is not None:\n idx = self.msg_alphabet.find(msg_char)\n if idx != -1:\n return r.element(idx)\n return None\n \n def locate(self, msg_char, key_char):\n \"\"\" Locate character at intersection of character `a` with row occupant character `k` \"\"\"\n \"\"\" Order here *is* important, but has nothing to do with rows vs. columns \"\"\"\n \"\"\" If character `a` not found, return None \"\"\"\n r = self.key_row(key_char)\n if r is not None:\n idx = r.find(msg_char)\n if idx != -1:\n return self.msg_alphabet.element(idx)\n return None\n\n #def p(self, delimiter=' '):\n # rows = []\n # l = len(self.table[0].elements)\n # rows.append(delimiter * 4 + delimiter.join(self.table[0].elements))\n # rows.append(delimiter * 2 + '+' + '-' * (l * 2))\n # rows.extend(row.elements[0] + ' |' + delimiter + delimiter.join(row.elements) for row in self.table)\n # return '\\n'.join(rows)\n","sub_path":"hj/utils/tabula_recta.py","file_name":"tabula_recta.py","file_ext":"py","file_size_in_byte":2153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"76782127","text":"# -*- coding: utf-8 -*-\n'''test for util.py'''\nimport unittest\nimport util\n# from .. import util\n# from util import get_cv\n\nclass Test(unittest.TestCase):\n rafsis = (\n (\"a'u\", \"V'V\"),\n ('bajyjvi', 'CVCYCCV'),\n ('brode', 'CCVCV'),\n ('brife', 'CCVCV'),\n (\"noltruti'u\", \"CVCCCVCV'V\"),\n ('inaja', 'VCVCV'),\n ('selci', 'CVCCV'),\n ('tcini', 'CCVCV'),\n ('toi', 'CVV'),\n ('uenai', 'VVCVV'),\n (\"pe'o\", \"CV'V\")\n )\n def test_get_cv(self):\n for rafsi, data_cv in self.rafsis:\n result = util.get_cv(rafsi)\n self.assertEqual(result, data_cv)\n result = util.get_cv(rafsi, False)\n self.assertEqual(result, data_cv.replace(\"'\", ''))\n self.assertRaises(ValueError, util.get_cv, 'dfha')\n\n def test_permissible_c(self):\n for rafsi, data_cv in self.rafsis:\n self.assertEqual(util.permissible_c('gf'), 0)\n self.assertEqual(util.permissible_c('ts'), 2)\n self.assertEqual(util.permissible_c('dl'), 1)\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"scripts/test_util.py","file_name":"test_util.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"57905583","text":"\"\"\"Options for FX exporter.\"\"\"\nfrom __future__ import annotations\n\nimport dataclasses\nfrom typing import Callable, Dict\n\nimport torch\nfrom torch.onnx import _constants\nfrom torch.onnx._internal.fx import function_dispatcher\n\n\n@dataclasses.dataclass\nclass ExportOptions:\n \"\"\"Options for FX-ONNX export.\n Attributes:\n opset_version: The export ONNX version.\n use_binary_format: Whether to Return ModelProto in binary format.\n decomposition_table: The decomposition table for graph ops.\n Default is for torch ops, including aten and prim.\n op_level_debug: Whether to export the model with op level debug\n information with onnxruntime evaluator. op_level_debug is not supported\n when dynamic axes is on.\n enable_dynamic_axes: Whether to export the model with dynamic axes. This would set\n the shape of input and nodes all to dynamic by following symbolic fx graph.\n op_level_debug is not supported when dynamic axes is on.\n \"\"\"\n\n opset_version: int = _constants.ONNX_DEFAULT_OPSET\n use_binary_format: bool = True\n op_level_debug: bool = False\n # NOTE(titaiwang): What would be the best arg name for this?\n enable_dynamic_axes: bool = True\n decomposition_table: Dict[torch._ops.OpOverload, Callable] = dataclasses.field(\n default_factory=lambda: function_dispatcher._ONNX_FRIENDLY_DECOMPOSITION_TABLE\n )\n\n def update(self, **kwargs):\n for key, value in kwargs.items():\n if hasattr(self, key):\n if value is not None:\n setattr(self, key, value)\n else:\n raise KeyError(f\"ExportOptions has no attribute {key}\")\n\n # NOTE(titaiwang): op_level_debug needs fixed shape to generate example inputs\n # for torch ops and ONNX ops, but in dynamic export, we don't have this info, so\n # op_level_debug would be forced to False if enable_dynamic_axes is True\n # https://github.com/microsoft/onnx-script/issues/393\n if self.enable_dynamic_axes and self.op_level_debug:\n raise RuntimeError(\n \"op_level_debug and enable_dynamic_shape are mutually execusive. Please set only one of them to be True\"\n )\n","sub_path":"torch/onnx/_internal/fx/options.py","file_name":"options.py","file_ext":"py","file_size_in_byte":2258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"547084047","text":"# coding: utf-8\n\n# A simple rectangle data class written by Tamamu.\n# 2016-12-04\n\n\nclass Rectangle:\n def __init__(self, x, y, w, h):\n self.x = x\n self.y = y\n self.w = w\n self.h = h\n self.x2 = x+w\n self.y2 = y+h\n self.half_w = int(w/2)\n self.half_h = int(h/2)\n self.quarter_w = int(w/4)\n self.quarter_h = int(h/4)\n","sub_path":"rect.py","file_name":"rect.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"316171910","text":"# 통계학\n\n# 산술평균 : N개의 수들의 합을 N으로 나눈 값\n# 중앙값 : N개의 수들을 증가하는 순서로 나열했을 경우 그 중앙에 위치하는 값\n# 최빈값 : N개의 수들 중 가장 많이 나타나는 값\n# 범위 : N개의 수들 중 최댓값과 최솟값의 차이\n\n#N은 홀수\n\nimport sys\n\nN = int(sys.stdin.readline().rstrip())\nnums = []\nmode = dict()\nfor _ in range(N):\n nums.append(int(sys.stdin.readline().rstrip()))\n try: \n mode[nums[-1]] += 1\n except:\n mode[nums[-1]] = 1\n\nmodes = []\n\navg = int(round(sum(nums) / N, 0))\nmed = sorted(nums)[N // 2]\nfor k, v in mode.items():\n if v == max(mode.values()):\n modes.append(k)\nran = max(nums) - min(nums)\nmod = modes[0]\nif len(modes) > 1:\n mod = sorted(modes)[1]\n\nprint(avg)\nprint(med)\nprint(mod)\nprint(ran)\n","sub_path":"Python/BOJ/Level/11_Sort/04_2108.py","file_name":"04_2108.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"337743495","text":"import numpy as np\nimport sys\nimport dyna_gym.utils.distribution as distribution\nfrom random import randint\nfrom six import StringIO, b\nfrom gym import Env, spaces, utils\nfrom gym.envs.toy_text import discrete\n\nLEFT = 0\nDOWN = 1\nRIGHT = 2\nUP = 3\n\nMAPS = {\n \"4x4\": [\n \"SFFF\",\n \"FHFH\",\n \"FFFH\",\n \"HFFG\"\n ],\n \"8x8\": [\n \"SFFFFFFF\",\n \"FFFFFFFF\",\n \"FFFHFFFF\",\n \"FFFFFHFF\",\n \"FFFHFFFF\",\n \"FHHFFFHF\",\n \"FHFFHFHF\",\n \"FFFHFFFG\"\n ],\n}\n\nclass State:\n \"\"\"\n State class\n \"\"\"\n def __init__(self, index, time):\n self.index = index\n self.time = time\n\ndef random_map(map_size):\n nR, nC = map_size\n nH = int(0.2 * nR * nC) # Number of holes\n m = []\n for i in range(nR): # Generate ice floe\n m.append(nC * [\"F\"])\n m[0][0] = \"S\" # Generate start\n m[-1][-1] = \"G\" # Generate goal\n while nH > 0: # Generate holes\n i, j = (randint(0, nR-1), randint(0, nC-1))\n if m[i][j] is \"F\":\n m[i][j] = \"H\"\n nH -= 1\n for i in range(nR): # Formating\n m[i] = \"\".join(m[i])\n return m\n\ndef categorical_sample(prob_n, np_random):\n \"\"\"\n Sample from categorical distribution\n Each row specifies class probabilities\n \"\"\"\n prob_n = np.asarray(prob_n)\n csprob_n = np.cumsum(prob_n)\n return (csprob_n > np_random.rand()).argmax()\n\nclass NSFrozenLakeV0(Env):\n \"\"\"\n Winter is here. You and your friends were tossing around a frisbee at the park\n when you made a wild throw that left the frisbee out in the middle of the lake.\n The water is mostly frozen, but there are a few holes where the ice has melted.\n If you step into one of those holes, you'll fall into the freezing water.\n At this time, there's an international frisbee shortage, so it's absolutely imperative that\n you navigate across the lake and retrieve the disc.\n However, the ice is slippery, so you won't always move in the direction you intend.\n The surface is described using a grid like the following\n\n SFFF\n FHFH\n FFFH\n HFFG\n\n S : starting point, safe\n F : frozen surface, safe\n H : hole, fall to your doom\n G : goal, where the frisbee is located\n\n The episode ends when you reach the goal or fall in a hole.\n You receive a reward of 1 if you reach the goal, and zero otherwise.\n\n Non-Stationarity: when the transition function is stochastic , i.e. slippery ice,\n the probability of the resulting states from any action evolves randomly through\n time. The resulting transition function is L_p-Lipschitz.\n \"\"\"\n\n metadata = {'render.modes': ['human', 'ansi']}\n\n def __init__(self, desc=None, map_name=\"4x4\", map_size=(5,5), is_slippery=True):\n if desc is None and map_name is None:\n raise ValueError('Must provide either desc or map_name')\n elif desc is None:\n if map_name is \"random\":\n desc = random_map(map_size)\n else:\n desc = MAPS[map_name]\n self.desc = desc = np.asarray(desc, dtype='c')\n self.nrow, self.ncol = nrow, ncol = desc.shape\n\n self.nS = nrow * ncol # n states\n self.nA = 4 # n actions\n self.nT = 21 # n timesteps\n self.action_space = spaces.Discrete(self.nA)\n self.is_slippery = is_slippery\n self.tau = 1 # timestep duration\n self.L_p = 1.0\n self.L_r = 0.0\n #self.T = self.generate_transition_matrix()\n isd = np.array(self.desc == b'S').astype('float64').ravel() # Initial state distribution\n self.isd = isd / isd.sum()\n #self._seed()\n self.np_random = np.random.RandomState()\n self.reset()\n\n def _seed(self, seed=None):\n self.np_random, seed = utils.seeding.np_random(seed)\n return [seed]\n\n def reset(self):\n \"\"\"\n Reset the environment.\n IMPORTANT: Does not create a new environment.\n \"\"\"\n self.state = State(categorical_sample(self.isd, self.np_random), 0) # (index, time)\n self.lastaction = None # for rendering\n self.T = self.generate_transition_matrix()\n return self.state\n\n def display(self):\n print('Displaying NSFrozenLakeEnv-v0')\n print('map :')\n print(self.desc)\n print('n states :', self.nS)\n print('n actions :', self.nA)\n print('timeout :', self.nT)\n\n def inc(self, row, col, a):\n \"\"\"\n Given a position (row, col) and an action a, return the resulting position (row, col).\n \"\"\"\n if a==0: # left\n col = max(col-1,0)\n elif a==1: # down\n row = min(row+1,self.nrow-1)\n elif a==2: # right\n col = min(col+1,self.ncol-1)\n elif a==3: # up\n row = max(row-1,0)\n return (row, col)\n\n def to_s(self, row, col):\n \"\"\"\n From the state's position (row, col), retrieve the state index.\n \"\"\"\n return row * self.ncol + col\n\n def to_m(self, s):\n \"\"\"\n From the state index, retrieve the state's position (row, col).\n \"\"\"\n row = int(s / self.ncol)\n col = s - row * self.ncol\n return row, col\n\n def distance(self, s1, s2):\n \"\"\"\n Return the Manhattan distance between the positions of states s1 and s2\n \"\"\"\n if (type(s1) == State) and (type(s2) == State):\n row1, col1 = self.to_m(s1.index)\n row2, col2 = self.to_m(s2.index)\n else:\n assert (type(s1) == int), 'Error: input state has wrong type: type={}'.format(type(s1))\n assert (type(s2) == int), 'Error: input state has wrong type: type={}'.format(type(s2))\n row1, col1 = self.to_m(s1)\n row2, col2 = self.to_m(s2)\n return abs(row1 - row2) + abs(col1 - col2)\n\n def equality_operator(self, s1, s2):\n \"\"\"\n Return True if the input states have the same indexes.\n \"\"\"\n return (s1.index == s2.index)\n\n def reachable_states(self, s, a):\n if (type(s) == State):\n row, col = self.to_m(s.index)\n else:\n assert (type(s) == int), 'Error: input state has wrong type: type={}'.format(type(s))\n row, col = self.to_m(s)\n rs = np.zeros(shape=self.nS, dtype=int)\n if self.is_slippery:\n #for b in [(a-1)%4, a, (a+1)%4]:# Put back for 3 reachable states\n for b in range(4):\n newrow, newcol = self.inc(row, col, b)\n rs[self.to_s(newrow, newcol)] = 1\n else:\n newrow, newcol = self.inc(row, col, a)\n rs[self.to_s(newrow, newcol)] = 1\n return rs\n\n def distances_matrix(self, states):\n \"\"\"\n Return the distance matrix D corresponding to the states of the input array.\n D[i,j] = distance(si, sj)\n \"\"\"\n n = len(states)\n D = np.zeros(shape=(n, n))\n for i in range(n):\n for j in range(i+1, n):\n D[i,j] = self.distance(states[i], states[j])\n D[j,i] = self.distance(states[i], states[j])\n return D\n\n def generate_transition_matrix(self):\n T = np.zeros(shape=(self.nS, self.nA, self.nT, self.nS), dtype=float)\n for s in range(self.nS):\n for a in range(self.nA):\n # Generate distribution for t=0\n rs = self.reachable_states(s, a)\n nrs = np.sum(rs)\n w = distribution.random_tabular(size=nrs)\n wcopy = list(w.copy())\n T[s,a,0,:] = np.asarray([0 if x == 0 else wcopy.pop() for x in rs], dtype=float)\n row, col = self.to_m(s)\n row_p, col_p = self.inc(row, col, a)\n s_p = self.to_s(row_p, col_p)\n T[s,a,0,s_p] += 1.0 # Increase weight on normally reached state\n T[s,a,0,:] /= sum(T[s,a,0,:])\n states = []\n for k in range(len(rs)):\n if rs[k] == 1:\n states.append(State(k,0))\n D = self.distances_matrix(states)\n # Build subsequent distributions st LC constraint is respected\n for t in range(1, self.nT): # t\n w = distribution.random_constrained(w, D, self.L_p * self.tau)\n wcopy = list(w.copy())\n T[s,a,t,:] = np.asarray([0 if x == 0 else wcopy.pop() for x in rs], dtype=float)\n return T\n\n def transition_probability_distribution(self, s, t, a):\n assert s.index < self.nS, 'Error: index bigger than nS: s.index={} nS={}'.format(s.index, nS)\n assert t < self.nT, 'Error: time bigger than nT: t={} nT={}'.format(t, self.nT)\n assert a < self.nA, 'Error: action bigger than nA: a={} nA={}'.format(a, nA)\n return self.T[s.index, a, t]\n\n def transition_probability(self, s_p, s, t, a):\n assert s_p.index < self.nS, 'Error: position bigger than nS: s_p.index={} nS={}'.format(s_p.index, nS)\n assert s.index < self.nS, 'Error: position bigger than nS: s.index={} nS={}'.format(s.index, nS)\n assert t < self.nT, 'Error: time bigger than nT: t={} nT={}'.format(t, self.nT)\n assert a < self.nA, 'Error: action bigger than nA: a={} nA={}'.format(a, nA)\n return self.T[s.index, a, t, s_p.index]\n\n def get_time(self):\n return self.state.time\n\n def dynamic_reachable_states(self, s, a):\n \"\"\"\n Return a numpy array of the reachable states.\n Dynamic means that time increment is performed.\n \"\"\"\n rs = self.reachable_states(s, a)\n srs = []\n for i in range(len(rs)):\n if rs[i] == 1:\n srs.append(State(i, s.time + self.tau))\n assert (len(srs) == sum(rs))\n return np.array(srs)\n\n def static_reachable_states(self, s, a):\n \"\"\"\n Return a numpy array of the reachable states.\n Static means that no time increment is performed.\n \"\"\"\n rs = self.reachable_states(s, a)\n srs = []\n for i in range(len(rs)):\n if rs[i] == 1:\n srs.append(State(i, s.time))\n assert (len(srs) == sum(rs))\n return np.array(srs)\n\n def transition(self, s, a, is_model_dynamic=True):\n \"\"\"\n Transition operator, return the resulting state, reward and a boolean indicating\n whether the termination criterion is reached or not.\n The boolean is_model_dynamic indicates whether the temporal transition is applied\n to the state vector or not.\n \"\"\"\n d = self.transition_probability_distribution(s, s.time, a)\n p_p = categorical_sample(d, self.np_random)\n if is_model_dynamic:\n s_p = State(p_p, s.time + self.tau)\n else:\n s_p = State(p_p, s.time)\n r = self.instant_reward(s, s.time, a, s_p)\n done = self.is_terminal(s_p)\n return s_p, r, done\n\n def instant_reward(self, s, t, a, s_p):\n \"\"\"\n Return the instant reward for transition s, t, a, s_p\n \"\"\"\n newrow, newcol = self.to_m(s_p.index)\n newletter = self.desc[newrow, newcol]\n if newletter == b'G':\n return +1.0\n elif newletter == b'H':\n return -1.0\n else:\n return 0.0\n\n def expected_reward(self, s, t, a):\n \"\"\"\n Return the expected reward function at s, t, a\n \"\"\"\n R = 0.0\n d = self.transition_probability_distribution(s, t, a)\n for i in range(len(d)):\n s_p = State(i, s.time + self.tau)\n r_i = self.instant_reward(s, t, a, s_p)\n R += r_i * d[i]\n return R\n\n def is_terminal(self, s):\n \"\"\"\n Return True if the input state is terminal.\n \"\"\"\n row, col = self.to_m(s.index)\n letter = self.desc[row, col]\n done = bytes(letter) in b'GH'\n if s.time + self.tau >= self.nT: # Timeout\n done = True\n return done\n\n def step(self, a):\n s, r, done = self.transition(self.state, a, True)\n self.state = s\n self.lastaction = a\n return (s, r, done, {})\n\n def render(self, mode='human', close=False):\n if close:\n return\n outfile = StringIO() if mode == 'ansi' else sys.stdout\n\n row, col = self.state.index // self.ncol, self.state.index % self.ncol\n desc = self.desc.tolist()\n desc = [[c.decode('utf-8') for c in line] for line in desc]\n desc[row][col] = utils.colorize(desc[row][col], \"red\", highlight=True)\n if self.lastaction is not None:\n outfile.write(\" ({})\\n\".format([\"Left\",\"Down\",\"Right\",\"Up\"][self.lastaction]))\n else:\n outfile.write(\"\\n\")\n outfile.write(\"\\n\".join(''.join(line) for line in desc)+\"\\n\")\n\n if mode != 'human':\n return outfile\n","sub_path":"dyna_gym/envs/nsfrozenlake_v0.py","file_name":"nsfrozenlake_v0.py","file_ext":"py","file_size_in_byte":12907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"29154150","text":"\"\"\"\n元素模块,实现Element类,提供查找下级元素的方法和对元素进行操作的方法。\n\"\"\"\nimport json\nimport time\nimport win32api, win32con, win32clipboard\nfrom win32com.client import Dispatch\n\nfrom xlat import KLog\n\n\nclass Element(object):\n \"\"\"\n 元素类,代表一个元素,并提供查找下级元素的方法和对元素进行操作的方法。\n \n 查找元素:\n \n find_element\n find_element_by_xxx\n ...\n 对元素进行操作:\n \n click --点击\n set --输入字符\n ...\n \"\"\"\n\n def __init__(self, parent, value):\n self.parent = parent\n self.value = value\n self.autoit = Dispatch(\"AutoItX3.Control\")\n # self.conn = self.parent.get_conn()\n self.interval = 0.2 # 操作间隔,click、set等操作之后等待这个间隔之后才继续操作,防止操作过快引起不稳定。单位秒\n\n def __repr__(self):\n return '<{0.__module__}.{0.__name__}>'.format(type(self))\n\n def text(self):\n \"\"\"返回元素的文本\"\"\"\n if \"Text\" in self.value:\n return self.value[\"Text\"]\n return \"\"\n\n def click(self, ox=0, oy=0):\n \"\"\"点击元素\n \n 参数:\n ox -- x偏移,向右为正,向左为负\n oy -- y偏移,向下为正,向上为负\n \"\"\"\n point = self.value[\"center\"]\n x, y = point.split(\",\")\n x = int(x) + ox\n y = int(y) + oy\n KLog.debug(\"点击:x:\" + str(x) + \" y:\" + str(y))\n \n \n self.autoit.MouseMove(x, y)\n time.sleep(self.interval)\n self.autoit.MouseClick(\"left\", x, y)\n # self.autoit.MouseClick(\"left\",x,y,1,0) #最后一个参数表示鼠标移动速度,1-100,0表示立即,默认10\n\n # 方法2,win32api\n # win32api.SetCursorPos([x,y]) #为鼠标焦点设定一个位置\n # win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN,0,0,0,0) \n # win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP,0,0,0,0)\n\n time.sleep(self.interval)\n\n def right_click(self, ox=0, oy=0):\n \"\"\"右击元素\n\n 参数:\n ox -- x偏移,向右为正,向左为负\n oy -- y偏移,向下为正,向上为负\n \"\"\"\n point = self.value[\"center\"]\n x, y = point.split(\",\")\n x = int(x) + ox\n y = int(y) + oy\n KLog.debug(\"右击:x:\" + str(x) + \" y:\" + str(y))\n self.autoit.MouseMove(x, y)\n time.sleep(self.interval)\n self.autoit.MouseClick(\"right\", x, y)\n time.sleep(self.interval)\n\n def doubleclick(self):\n \"\"\"双击元素\"\"\"\n point = self.value[\"center\"]\n KLog.debug(point)\n x, y = point.split(\",\")\n self.autoit.MouseMove(x, y)\n time.sleep(self.interval)\n self.autoit.MouseClick(\"left\", x, y, 2)\n time.sleep(self.interval)\n\n def tripleclick(self):\n \"\"\"三击元素\"\"\"\n point = self.value[\"center\"]\n KLog.debug(point)\n x, y = point.split(\",\")\n self.autoit.MouseMove(x, y)\n time.sleep(self.interval)\n self.autoit.MouseClick(\"left\", x, y, 3)\n time.sleep(self.interval)\n\n def hover(self):\n \"\"\"hover元素\"\"\"\n point = self.value[\"center\"]\n KLog.debug(point)\n x, y = point.split(\",\")\n self.autoit.MouseMove(x, y)\n time.sleep(self.interval)\n\n def set(self, text):\n \"\"\"设置文本,用的是模拟输入的方式。优先用该方法,不行再用其他两种\"\"\"\n # self.doubleclick()\n self.tripleclick()\n self.autoit.Send(text)\n time.sleep(self.interval)\n\n def settext(self, text):\n \"\"\"另一种设置文本的方法,直接设置元素的值\"\"\"\n cmd = json.dumps({\"cmd\": \"set_text\", 'text': text, \"index\": self.value[\"index\"]}, ensure_ascii=False) + \"\\n\"\n KLog.debug(cmd.strip(\"\\n\"))\n self.get_conn().send(cmd.encode())\n recv_data = self.get_conn().recv(2048).decode()\n KLog.debug(recv_data.strip(\"\\n\"))\n h = json.loads(recv_data)\n time.sleep(self.interval)\n if h[\"success\"] is True:\n KLog.info(\"设置文本成功\")\n return True\n else:\n KLog.info(\"设置��本失败,未找到可以设置文本的元素\")\n return False\n\n def paste(self, text):\n \"\"\"第三种设置文本的方法,用复制粘贴的方法\"\"\"\n self.click()\n # text = text.encode(\"GB2312\")\n # print(text)\n win32clipboard.OpenClipboard()\n win32clipboard.EmptyClipboard()\n win32clipboard.SetClipboardData(win32con.CF_UNICODETEXT, text)\n # print(win32clipboard.GetClipboardData())\n win32clipboard.CloseClipboard()\n self.autoit.Send(\"^v\")\n time.sleep(self.interval)\n\n def clear(self):\n \"\"\"清空文本\"\"\"\n point = self.value[\"center\"]\n KLog.debug(point)\n x, y = point.split(\",\")\n self.autoit.MouseClick(\"left\", x, y, 3)\n time.sleep(self.interval)\n self.autoit.Send(\"{BS}\")\n time.sleep(self.interval)\n \n def check(self,ck=True):\n \"\"\"勾选复选框\n \n 参数:\n ck -- 勾选或者取消勾选,True是勾选,False是取消勾选。默认是True\n \"\"\"\n #实现原理是找到该元素下面的对号图标(tickimage),如果能找到则表明已经勾选,没找到就是没勾选(因为不可见)\n e = self.find_element_by_id(\"boximage\")\n if e is None:\n raise Exception(\"不是复选框,不能check!\")\n if ck == True:\n if e.find_element_by_id(\"tickimage\") is None:\n e.click()\n else:\n if e.find_element_by_id(\"tickimage\"):\n e.click()\n time.sleep(self.interval)\n \n def is_checked(self):\n \"\"\"检查复选框是否勾选状态\n\n \"\"\"\n #实现原理是找到该元素下面的对号图标(tickimage),如果能找到则表明已经勾选,没找到就是没勾选(因为不可见)\n e = self.find_element_by_id(\"boximage\")\n if e is None:\n raise Exception(\"不是复选框,不能检查check状态!\")\n if e.find_element_by_id(\"tickimage\") is None:\n return False\n else:\n return True\n time.sleep(self.interval)\n \n def menu_check(self,ck=True):\n \"\"\"勾选菜单\n \n 参数:\n ck -- 勾选或者取消勾选,True是勾选,False是取消勾选。默认是True\n \"\"\"\n #实现原理是找到菜单旁边的对号图标(tickimage),如果能找到则表明已经勾选,没找到就是没勾选(因为不可见)\n if ck == True:\n e = self.find_brother(id=\"icon\")\n if e is None or e.value[\"ResId\"] == \"\":\n self.click()\n else:\n e = self.find_brother(id=\"icon\")\n if e and e.value[\"ResId\"] != \"\":\n e.click()\n time.sleep(self.interval)\n \n def menu_is_disable(self):\n \"\"\"菜单是否被禁用\n\n \"\"\"\n if \"TextColorResID\" in self.value and self.value[\"TextColorResID\"]==\"menu.color.disable\" :\n return True\n else:\n return False\n\n def drag(self, ox, oy):\n \"\"\"点击元素\n \n 参数:\n oy -- 拖放位置的x偏移,向右为正,向左为负\n oy -- 拖放位置的y偏移,向下为正,向上为负\n 比如,要往下拖动10像素,那就是drag(0,10)\n \"\"\"\n point = self.value[\"center\"]\n x, y = point.split(\",\")\n KLog.debug(\"拖动到:x:\" + str(int(x) + ox) + \" y:\" + str(int(y) + oy))\n self.autoit.MouseClickDrag(\"left\", x, y, int(x) + ox, int(y) + oy)\n time.sleep(self.interval)\n\n def refresh(self):\n \"\"\"更新元素的属性\n \"\"\"\n cmd = json.dumps({\"cmd\": \"refresh\", \"index\": self.value[\"index\"]},\n ensure_ascii=False) + \"\\n\"\n KLog.debug(cmd.strip(\"\\n\"))\n self.get_conn().send(cmd.encode())\n recv_data = self.get_conn().recv(2048).decode()\n KLog.debug(recv_data.strip(\"\\n\"))\n h = json.loads(recv_data)\n if h[\"success\"] is True:\n self.value = h[\"value\"]\n KLog.info(\"更新元素属性成功\")\n return self\n else:\n KLog.info(\"更新元素属性失败\")\n return None\n\n def get_conn(self):\n return self.parent.get_conn()\n \n def find_parent(self):\n \"\"\"查找该元素的父元素\"\"\"\n cmd = json.dumps({\"cmd\": \"find_parent\", \"index\": self.value[\"index\"]},\n ensure_ascii=False) + \"\\n\"\n KLog.debug(cmd.strip(\"\\n\"))\n self.get_conn().send(cmd.encode())\n recv_data = self.get_conn().recv(2048).decode()\n KLog.debug(recv_data.strip(\"\\n\"))\n h = json.loads(recv_data)\n if h[\"success\"] is True:\n KLog.info(\"找到父元素\")\n return Element(self, h[\"value\"])\n else:\n KLog.info(\"找不到父元素\")\n return None\n\n \n def find_brother(self,**kwargs):\n \"\"\"查找该元素符合条件的兄弟元素。默认元素需可见,如不需要则加上visible=False\n \n 参数:\n \n kwargs --关键字参数,要匹配的元素属性值。key不区分大小写。如id=\"aaa\"\n 返回值:\n \n 找到的元素Element类,只返回找到的第一个。找不到返回None\n 例子:\n \n find_element(ID=\"aaa\",Text=\"bbb\") \n 查找id是aaa且文本是bbb的元素\n \"\"\"\n # 把key转成小写\n param = {k.lower(): v for k, v in kwargs.items()}\n # 如果没有指定visible,则默认设为True\n if \"visible\" not in param:\n param[\"visible\"] = True\n cmd = json.dumps({\"cmd\": \"find_brother\", 'param': param, \"index\": self.value[\"index\"]},\n ensure_ascii=False) + \"\\n\"\n KLog.debug(cmd.strip(\"\\n\"))\n self.get_conn().send(cmd.encode())\n recv_data = self.get_conn().recv(2048).decode()\n KLog.debug(recv_data.strip(\"\\n\"))\n h = json.loads(recv_data)\n if h[\"success\"] is True:\n KLog.info(\"找到元素:\" + str(param))\n return Element(self, h[\"value\"])\n else:\n KLog.info(\"找不到元素:\" + str(param))\n return None\n\n def find_element(self, **kwargs):\n \"\"\"在该元素内查找符合条件的元素。默认元素需可见,如不需要则加上visible=False\n \n 参数:\n \n kwargs --关键字参数,要匹配的元素属性值。key不区分大小写。如id=\"aaa\"\n 返回值:\n \n 找到的元素Element类,只返回找到的第一个。找不到返回None\n 例子:\n \n find_element(ID=\"aaa\",Text=\"bbb\") \n 查找id是aaa且文本是bbb的元素\n \"\"\"\n # 把key转成小写\n param = {k.lower(): v for k, v in kwargs.items()}\n # 如果没有指定visible,则默认设为True\n if \"visible\" not in param:\n param[\"visible\"] = True\n cmd = json.dumps({\"cmd\": \"find_element\", 'param': param, \"index\": self.value[\"index\"]},\n ensure_ascii=False) + \"\\n\"\n KLog.debug(cmd.strip(\"\\n\"))\n self.get_conn().send(cmd.encode())\n recv_data = self.get_conn().recv(2048).decode()\n KLog.debug(recv_data.strip(\"\\n\"))\n h = json.loads(recv_data)\n if h[\"success\"] is True:\n KLog.info(\"找到元素:\" + str(param))\n return Element(self, h[\"value\"])\n else:\n KLog.info(\"找不到元素:\" + str(param))\n return None\n\n def find_elements(self, **kwargs):\n \"\"\"在该元素查找符合条件的所有元素\n \n 参数:\n \n kwargs --关键字参数,要匹配的元素属性值。key不区分大小写。如id=\"aaa\"\n 返回值:\n \n 返回一个数组,包含所有找到的元素Element类。找不到返回空数组[]\n 例子:\n \n find_elements(ID=\"aaa\",Text=\"bbb\") \n 查找id是aaa且文本是bbb的所有元素\n \"\"\"\n param = kwargs\n cmd = json.dumps({\"cmd\": \"find_elements\", 'param': param, \"index\": self.value[\"index\"]},\n ensure_ascii=False) + \"\\n\"\n KLog.debug(cmd.strip(\"\\n\"))\n self.get_conn().send(cmd.encode())\n recv_data = self.get_conn().recv(10240).decode()\n KLog.debug(recv_data.strip(\"\\n\"))\n h = json.loads(recv_data)\n res = []\n for i in h[\"value\"]:\n res.append(Element(self, i))\n KLog.debug(res)\n return res\n\n def find_menu_element(self, param):\n \"\"\"在该元素内查找符合条件的菜单,这个方法应该用不到\n \"\"\"\n # if type(param) == str:\n param = {\"text\": param, \"precise\": False}\n cmd = json.dumps({\"cmd\": \"find_menu_element\", 'param': param}, ensure_ascii=False) + \"\\n\"\n KLog.debug(cmd.strip(\"\\n\"))\n self.get_conn().send(cmd.encode())\n recv_data = self.get_conn().recv(2048).decode()\n KLog.debug(recv_data.strip(\"\\n\"))\n h = json.loads(recv_data)\n if h[\"success\"] is True:\n KLog.info(\"找到菜单项:\" + str(param))\n return Element(self, h[\"value\"])\n else:\n KLog.info(\"找不到菜单项:\" + str(param))\n return None\n\n def wait(self, **kwargs):\n \"\"\"等待直到符合条件的元素出现\n \n 参数:\n \n kwargs --关键字参数,要匹配的元素属性值。默认超时是3s,���以用timeout=10修改\n 返回值:\n \n 找到的元素Element类,只返回找到的第一个。找不到返回None\n 例子:\n \n wait(ID=\"aaa\",Text=\"bbb\",timeout=1) \n 等待id是aaa且文本是bbb的元素出现,1s超时\n \"\"\"\n if \"timeout\" in kwargs:\n timeout = kwargs.pop(\"timeout\")\n else:\n timeout = 3\n start = time.time()\n while time.time() <= start + timeout:\n e = self.find_element(**kwargs)\n if e is not None:\n return e\n else:\n # KLog.debug(\"not found...\")\n time.sleep(1)\n # 最后再检查一次\n e = self.find_element(**kwargs)\n if e is not None:\n return e\n else:\n KLog.info(str(timeout) + \"秒超时,找不到元素:\" + str(kwargs))\n return None\n\n def wait_not(self, **kwargs):\n \"\"\"等待直到符合条件的元素消失\n \n 参数:\n \n kwargs --关键字参数,要匹配的元素属性值。默认超时是3s,可以用timeout=10修改\n 返回值:\n \n 元素消失返回True,没消失返回False\n 例子:\n \n wait_not(ID=\"aaa\",Text=\"bbb\",timeout=1) \n 等待id是aaa且文本是bbb的元素消失,1s超时\n \"\"\"\n if \"timeout\" in kwargs:\n timeout = kwargs.pop(\"timeout\")\n else:\n timeout = 3\n start = time.time()\n while time.time() <= start + timeout:\n e = self.find_element(**kwargs)\n if e is None:\n return True\n else:\n KLog.debug(\"still found...\")\n time.sleep(1)\n # 最后再检查一次\n e = self.find_element(**kwargs)\n if e is None:\n return True\n else:\n KLog.info(str(timeout) + \"秒超时,元素仍然存在:\" + str(kwargs))\n return False\n\n def find_element_by_id(self, id):\n \"\"\"在该元素内根据id查找元素\n \n 参数:\n \n id -- 元素id,字符串\n 返回值:\n \n 找到的元素Element类,只返回找到的第一个。找不到返回None\n 例子:\n \n find_element_by_id(\"aaa\") -- 查找id是aaa的元素\n \"\"\"\n return self.find_element(ID=id)\n\n def find_elements_by_id(self, id):\n \"\"\"在该元素内根据id查找所有元素\n \n 参数:\n \n id -- 元素id,字符串\n 返回值:\n \n 找到的元素Element类数组,找不到返回空数组[]\n 例子:\n \n find_elements_by_id(\"aaa\") -- 查找所有id是aaa的元素\n \"\"\"\n return self.find_elements(ID=id)\n\n def find_element_by_text(self, text, precise=True):\n \"\"\"在该元素内根据文本查找元素,默认是精确查找\n \n 参数:\n \n text -- 元素文本,字符串\n precise -- True表示精确查找,False表示模糊查找。默认True\n 返回值:\n \n 找到的元素Element类,只返回找到的第一个。找不到返回None\n 例子:\n \n find_element_by_text(\"bbb\") -- 查找文本是bbb的元素\n \"\"\"\n return self.find_element(Text=text, precise=precise)\n\n def find_elements_by_text(self, text, precise=True):\n \"\"\"在该元素内根据文本查找所有元素,默认是精确查找\n \n 参数:\n \n text -- 元素文本,字符串\n precise -- True表示精确查找,False表示模糊查找。默认True\n 返回值:\n \n 找到的元素Element类数组,找不到返回空数组[]\n 例子:\n \n find_elements_by_text(\"bbb\") -- 查找文本是bbb的元素\n \"\"\"\n return self.find_elements(Text=text, precise=precise)\n\n def find_element_by_class(self, class_name):\n \"\"\"在该元素内根据class查找元素\n \n 参数:\n \n class_name -- 元素class,字符串\n 返回值:\n \n 找到的元素Element类,只返回找到的第一个。找不到返回None\n 例子:\n \n find_element_by_class(\"ccc\") -- 查找class是ccc的元素\n \"\"\"\n return self.find_element(Class=class_name)\n\n def find_elements_by_class(self, class_name):\n \"\"\"在该元素内根据class查找所有元素\n \n 参数:\n \n class_name -- 元素class,字符串\n 返回值:\n \n 找到的元素Element类数组,找不到返回空数组[]\n 例子:\n \n find_element_by_class(\"ccc\") -- 查找class是ccc的���有元素\n \"\"\"\n return self.find_elements(Class=class_name)\n\n def find_element_by_xpath(self, xpath):\n \"\"\"在该元素内根据xpath查找元素\n \n (未实现)\n \"\"\"\n pass\n\n def find_elements_by_xpath(self, xpath):\n \"\"\"在该元素内根据xpath查找所有元素\n \n (未实现)\n \"\"\"\n pass\n","sub_path":"xlat/element.py","file_name":"element.py","file_ext":"py","file_size_in_byte":19305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"142279255","text":"import torch\nfrom .model import create_model\nfrom .dataloaders import get_three_loaders, get_train_val_test_loaders\nfrom .predict import load_model_from_weights\nfrom .train import train\n\n\ndef train_classifier_on_directory(data_dir: str, val=True, test=True,\n data_in_train_dir=False, N_EPOCHS=5):\n \"\"\"\n Trains a classifier on a directory\n Args:\n data_dir: Directory of your data\n\n Returns: a model\n \"\"\"\n classes = []\n val_loader = None\n test_loader = None\n if not data_in_train_dir:\n (train_loader, val_loader, test_loader), classes = get_three_loaders(data_dir)\n else:\n if val and test:\n loaders, classes = get_train_val_test_loaders(data_dir)\n train_loader = loaders[0]\n val_loader = loaders[1]\n test_loader = loaders[2]\n elif val and not test:\n loaders, classes = get_train_val_test_loaders(data_dir)\n train_loader = loaders[0]\n val_loader = loaders[1]\n else:\n loaders, classes = get_train_val_test_loaders(data_dir)\n train_loader = loaders[0]\n test_loader = loaders[1]\n\n if len(classes) == 0:\n raise FileNotFoundError(\"There are no classes found in your directory, please give a valid directory\")\n classifier = create_model(len(classes), pretrained=True)\n train(N_EPOCHS, classifier, train_loader, val_loader=val_loader)\n return classifier\n","sub_path":"sclassifier/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"549032820","text":"from pysyncobj.dns_resolver import global_dns_resolver\nfrom pysyncobj.node import Node\n\n\nclass TCPNode(Node):\n \"\"\"\n A node intended for communication over TCP/IP. Its id is the network address (host:port).\n \"\"\"\n\n def __init__(self, address, **kwargs):\n \"\"\"\n Initialise the TCPNode\n\n :param address: network address of the node in the format 'host:port'\n :type address: str\n :param **kwargs: any further information that should be kept about this node\n \"\"\"\n\n super(TCPNode, self).__init__(address, **kwargs)\n self.address = address\n self.host, port = address.rsplit(':', 1)\n self.port = int(port)\n self.ip = global_dns_resolver().resolve(self.host)\n\n def __repr__(self):\n v = vars(self)\n filtered = ['_id', 'address', 'host', 'port', 'ip']\n formatted = ['{} = {}'.format(key, repr(v[key]))\n for key in v if key not in filtered]\n return '{}({}{})'.format(\n type(self).__name__, repr(self.id),\n (', ' + ', '.join(formatted)) if len(formatted) else '')\n","sub_path":"pysyncobj/tcp_node.py","file_name":"tcp_node.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"178925615","text":"#!/usr/bin/env python\n\n\"\"\"\nThis module is used for provide information from vasp relax.\n\"\"\"\n\nimport numpy as np\nfrom twinpy.plot.base import DEFAULT_COLORS, DEFAULT_MARKERS, line_chart\nfrom twinpy.structure.diff import get_structure_diff\n\n\nclass RelaxPlot():\n \"\"\"\n Relax transition plot class.\n \"\"\"\n\n def __init__(\n self,\n relax_data:dict,\n static_data:dict=None,\n start_step:int=1,\n ):\n \"\"\"\n Args:\n relax_data: Relax data.\n static_data: Static data.\n start_step: The step number of the first relax in this WorkChain.\n If you relax 20 steps in the privious RelaxWorkChain,\n for example, start_step becomes 21.\n \"\"\"\n self._relax_data = relax_data\n self._static_data = static_data\n self._start_step = start_step\n if self._static_data is None:\n self._exist_static = False\n else:\n self._exist_static = True\n self._vasp_final_steps = None\n self._set_vasp_final_steps()\n\n @property\n def relax_data(self):\n \"\"\"\n Relax data.\n \"\"\"\n return self._relax_data\n\n @property\n def static_data(self):\n \"\"\"\n Static data.\n \"\"\"\n return self._static_data\n\n @property\n def start_step(self):\n \"\"\"\n Start step.\n \"\"\"\n return self._start_step\n\n def _set_vasp_final_steps(self):\n \"\"\"\n Set vasp final steps.\n \"\"\"\n eg_cols = self._relax_data['step_energies_collection']\n vasp_final_steps = []\n count = self._start_step - 1\n for cols in eg_cols:\n count += len(cols['energy_extrapolated'])\n vasp_final_steps.append(count)\n self._vasp_final_steps = vasp_final_steps\n\n @property\n def vasp_final_steps(self):\n \"\"\"\n Final steps of each vasp calculation.\n \"\"\"\n return self._vasp_final_steps\n\n def plot_max_force(self,\n ax,\n decorate:bool=True):\n \"\"\"\n Plot max force.\n\n Args:\n ax: Matplotlib subplot.\n decorate (bool): If True, decorate figure.\n \"\"\"\n if decorate:\n xlabel = 'Relax Steps'\n ylabel = 'Max Force'\n else:\n xlabel = ylabel = None\n\n steps = self._vasp_final_steps\n max_forces = self._relax_data['max_force']\n line_chart(\n ax,\n steps,\n max_forces,\n xlabel=xlabel,\n ylabel=ylabel,\n c=DEFAULT_COLORS[0],\n marker=DEFAULT_MARKERS[0],\n facecolor='None')\n\n if self._exist_static:\n static_step = steps[-1] + 0.1\n static_max_force = self._static_data['max_force']\n ax.scatter(static_step, static_max_force,\n c=DEFAULT_COLORS[0], marker='*', s=150)\n\n def plot_energy(self,\n ax,\n decorate:bool=True):\n \"\"\"\n Plot energy.\n\n Args:\n ax: Matplotlib subplot.\n decorate (bool): If True, decorate figure.\n \"\"\"\n if decorate:\n xlabel = 'Relax Steps'\n ylabel = 'Energy [eV]'\n else:\n xlabel = ylabel = None\n\n steps = self._vasp_final_steps\n energies = self._relax_data['energy']\n eg_cols = self._relax_data['step_energies_collection']\n vasp_energies = []\n for cols in eg_cols:\n vasp_energies.extend(cols['energy_extrapolated'])\n vasp_steps = [ i+self._start_step for i in range(len(vasp_energies)) ]\n\n line_chart(\n ax,\n vasp_steps,\n vasp_energies,\n xlabel,\n ylabel,\n c=DEFAULT_COLORS[0],\n marker=DEFAULT_MARKERS[0],\n s=5,\n facecolor='None')\n ax.scatter(self._vasp_final_steps, energies, c=DEFAULT_COLORS[0],\n marker=DEFAULT_MARKERS[1], facecolor=DEFAULT_MARKERS[0])\n\n if self._exist_static:\n static_step = steps[-1] + 0.1\n static_energy = self._static_data['energy']\n ax.scatter(static_step, static_energy,\n edgecolor=DEFAULT_COLORS[0], marker='*', s=150,\n facecolor='None')\n\n def plot_stress(self,\n ax,\n decorate:bool=True):\n \"\"\"\n Plot stress.\n\n Args:\n ax: Matplotlib subplot.\n decorate (bool): If True, decorate figure.\n \"\"\"\n if decorate:\n xlabel = 'Relax Steps'\n ylabel = 'Stress'\n stress_labels = ['xx', 'yy', 'zz', 'yz', 'zx', 'xy']\n else:\n xlabel = ylabel = None\n stress_labels = [None] * 6\n\n steps = self._vasp_final_steps\n stresses = self._relax_data['stress']\n\n for i in range(6):\n line_chart(\n ax,\n steps,\n stresses[:,i],\n xlabel,\n ylabel,\n c=DEFAULT_COLORS[i],\n marker=DEFAULT_MARKERS[i],\n facecolor='None',\n label=stress_labels[i])\n\n if self._exist_static:\n static_step = steps[-1] + 0.1\n static_stress = self._static_data['stress']\n for i in range(6):\n ax.scatter(static_step, static_stress[i],\n c=DEFAULT_COLORS[i], marker='*', s=150)\n\n ax.legend(loc='upper left')\n\n def plot_abc(self,\n ax,\n decorate:bool=True):\n \"\"\"\n Plot abc.\n\n Args:\n ax: Matplotlib subplot.\n decorate (bool): If True, decorate figure.\n \"\"\"\n if decorate:\n xlabel = 'Relax Steps'\n ylabel = 'Length [angstrom]'\n abc_labels = ['a', 'b', 'c']\n else:\n xlabel = ylabel = None\n abc_labels = [None] * 3\n\n steps = self._vasp_final_steps\n abcs = self._relax_data['abc']\n\n for i in range(3):\n line_chart(\n ax,\n steps,\n abcs[:,i],\n xlabel=xlabel,\n ylabel=ylabel,\n c=DEFAULT_COLORS[i],\n marker=DEFAULT_MARKERS[i],\n facecolor='None',\n label=abc_labels[i])\n\n if self._exist_static:\n static_step = steps[-1] + 0.1\n static_abc = self._static_data['abc']\n for i in range(3):\n ax.scatter(static_step, static_abc[i],\n c=DEFAULT_COLORS[i], marker='*', s=150)\n\n ax.legend(loc='upper left')\n\n\ndef plot_atom_diff(ax,\n initial_cell:tuple,\n final_cell:tuple,\n decorate:bool=True,\n direction:str='x',\n shuffle:bool=True,\n label:str='default',\n **kwargs):\n \"\"\"\n Plot atom diff.\n\n Args:\n initial_cell (tuple): Initial cell.\n final_cell (tuple): Final cell.\n decorate (bool): If True, decorate figure.\n direction (str): Diff direction.\n shuffle (bool): If True, diffrence of scaled positions,\n which ignore lattice shear, are ploted.\n\n Notes:\n For input 'kwargs', see twinpy.plot.base.line_chart.\n \"\"\"\n diff = get_structure_diff(cells=[initial_cell, final_cell],\n include_base=False)\n if shuffle:\n scaled_posi_diffs = diff['scaled_posi_diffs'][0]\n cart_posi_diffs = np.dot(final_cell[0].T, scaled_posi_diffs.T).T\n else:\n cart_posi_diffs = diff['cart_posi_diffs'][0]\n\n z_coords = np.dot(initial_cell[0].T, initial_cell[1].T).T[:,2]\n\n if label == 'default':\n label = direction\n\n if decorate:\n xlabel = 'Distance [angstrom]'\n ylabel = 'Initial z coordinate'\n else:\n xlabel = None\n ylabel = None\n\n dic = {'x': 0, 'y': 1, 'z': 2}\n idx = dic[direction]\n\n line_chart(ax=ax,\n xdata=cart_posi_diffs[:,idx],\n ydata=z_coords,\n xlabel=xlabel,\n ylabel=ylabel,\n label=label,\n sort_by='y',\n **kwargs)\n\n ax.legend()\n","sub_path":"twinpy/plot/relax.py","file_name":"relax.py","file_ext":"py","file_size_in_byte":8560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"35819388","text":"class Pelmanism(object):\n\n def __init__(self, _get_input):\n self.player = {}\n self.cards = [input_line for input_line in _get_input]\n self.player_num = 1\n\n def set_player(self, num):\n if num not in self.player:\n self.player[num] = Player()\n return self.player[num]\n\n def turn_cards(self, a, b, A, B):\n player = self.set_player(self.player_num)\n card1 = self.cards[a][b]\n card2 = self.cards[A][B]\n if card1 is not None and card1 == card2:\n player.has_trumps += 2\n self.delete_cards(a, b, A, B)\n else:\n self.player_num += 1\n self.check_player_num()\n\n def delete_cards(self, a, b, A, B):\n self.cards[a][b] = None\n self.cards[A][B] = None\n\n def check_player_num(self):\n if self.player_num > N:\n self.player_num = 1\n\n def display(self):\n for player_num in sorted(self.player):\n self.player[player_num].display()\n\n\nclass Player(object):\n\n def __init__(self):\n self.has_trumps = 0\n\n def display(self):\n print(self.has_trumps)\n\n\ndef get_input(H):\n for h in range(H):\n input_line = map(int, input().split())\n yield list(input_line)\n\n\nH, W, N = map(int, input().split())\n\n_get_input = get_input(H)\npelmanism = Pelmanism(_get_input)\n\nfor n in range(N):\n player = pelmanism.set_player(n + 1)\n\nfor l in range(int(input())):\n a, b, A, B = map(int, input().split())\n pelmanism.turn_cards(a - 1, b - 1, A - 1, B - 1)\n\n\npelmanism.display()\n","sub_path":"takumiy/B/B027.py","file_name":"B027.py","file_ext":"py","file_size_in_byte":1562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"101262039","text":"import sys\ndef readfunction(file):\n with open(file,'r') as f:\n content = [x.strip('\\n') for x in f.readlines()]\n return content\n\ndef find_anchor(input,target):\n for i, li in enumerate(input):\n if li==target:\n return input[i:]\n\ndef find_node_table(input):\n table={}\n for i, li in enumerate(input):\n if len(li)>4 and li[0:4]=='$var':\n symbol, signal = li.split(' ')[3:5]\n table[symbol] = signal\n elif li=='$enddefinitions $end':return table\n return table\n\ndef resolve_node_table(input1,input2):\n table={}\n #filter key differences(typical length mismatch)\n diff_keys=set(input1.keys())-set(input2.keys())\n #filter signal differences(signal naming mismatch)\n diff_signals=set(input1.values())-set(input2.values())\n\n if( diff_keys or diff_signals ):return table\n for i in input1:\n if input1[i]!=input2[i]:\n for j in input2:\n if input1[i]==input2[j]:\n table[j]=i\n else:\n table[i]=i\n return table\n\ndef compose_time_list_ans(input):\n from collections import defaultdict\n table=defaultdict(dict)\n temp='$dumpvars'\n for i in input:\n i.replace(' ','')\n if (i[0]=='$') or (i[0]=='#'):\n temp=i\n else:\n table[temp][ i[-1] ] = i[0:-1]\n return table;\n\n\ndef compose_time_list(input,intable):\n from collections import defaultdict\n table=defaultdict(dict)\n temp='$dumpvars'\n for i in input:\n i.replace(' ','')\n if (i[0]=='$') or (i[0]=='#'):\n temp=i\n else:\n key = intable[ i[-1] ]\n table[temp][key] = i[0:-1]\n return table;\n\ndef compare_list(input_a,input_s,):\n diff = set(input_a.keys()) - set(input_s.keys())\n if( diff ):\n print('Timing Mismatch')\n return False\n for k,v in input_a.items():\n diff = set(v.keys()) - set(input_s[k].keys())\n if( diff ):\n print('Signal mismatch at '+k)\n return False\n va = input_s[k]\n for p,q in v.items():\n if(va[p]!=q):\n print('value mismatch at'+k)\n return False\n return True\n \nimport pickle \ndef compress(dir, odir):\n answer_list=readfunction(dir)\n a_list=find_anchor(answer_list,'$dumpvars')\n table_a=find_node_table(answer_list)\n ac_list=compose_time_list_ans(a_list)\n \n pickle.dump((ac_list,table_a),open(odir,'wb'))\ndef load(dir):\n return pickle.load((open(dir,'rb')))\n\ndef main():\n\n def parse_arg():\n import argparse\n parser = argparse.ArgumentParser(description='compress images into pickle object')\n parser.add_argument('-a', dest='dir', type=str,\n help='data directory')\n parser.add_argument('-s', dest='dir2', type=str,\n help='data directory')\n return parser.parse_args() \n args = parse_arg()\n \n #answer_list=readfunction(args.dir)\n submit_list=readfunction(args.dir2)\n ans=load(args.dir)\n \n \"\"\"\n submit_list=readfunction('C:/Users/CMH/Desktop/Eight_Seven3m.vcd')\n ans=load('C:/Users/CMH/Desktop/Eight_Seven3')\n \"\"\"\n\n #a_list=find_anchor(answer_list,'$dumpvars')\n s_list=find_anchor(submit_list,'$dumpvars')\n\n #table_a=find_node_table(answer_list)\n table_s=find_node_table(submit_list)\n\n table_r=resolve_node_table(ans[1],table_s)\n if( not table_r ):\n print(\"signal names mismatch\")\n sys.exit(1)\n return\n #ac_list=compose_time_list_ans(a_list)\n sc_list=compose_time_list(s_list,table_r)\n if(compare_list(ans[0],sc_list)): sys.exit(0)\n sys.exit(1)\n\nif __name__ == \"__main__\":\n main()\n #print(list1)","sub_path":"WaveParse/WaveParse.py","file_name":"WaveParse.py","file_ext":"py","file_size_in_byte":3742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"20895021","text":"import os\nimport csv\n\nbudget_data = os.path.join(\"Resources\", \"budget_data.csv\")\n\nwith open(budget_data, newline=\"\") as csvfile:\n csvreader = csv.reader(csvfile, delimiter=\",\")\n csv_header = next(csvfile)\n # skip header row\n #print(\"Header: \", str(csv_header))\n #The total number of months included in the dataset\n print(\"Financial Analysis\")\n print(\"---------------------\")\n months = []\n Total = []\n for rows in csvreader:\n months.append(rows[0])\n Total.append(int(rows[1]))\n print(\"Total months: \", str(len(months)))\n print(\"Total: $\" + str(sum(Total)))\n #Total = []\n #for rows in csvreader:\n # Total.append(int(rows[1]))\n #print(print(\"Total: $\" + str(sum(Total))))\n\n #The average of the changes in \"Profit/Losses\" over the entire period\n #find the montly changes\n monthly_change = []\n prev_month = 0\n for r in range(len(Total)):\n if(r == 0):\n prev_month = Total[r]\n else:\n monthly_difference = Total[r] - prev_month\n monthly_change.append(monthly_difference)\n prev_month = Total[r]\n\n #calculate the Average\n length_montly_change = len(monthly_change)\n sum_of_montly_change = sum(monthly_change)\n total_average = sum_of_montly_change / length_montly_change\n formatted_total_average = \"{:.2f}\".format(total_average)\n\n print(\"Average of changes in Profit/Losses: $\", str(formatted_total_average))\n\n #The greatest increase in profits (date and amount) over the entire period\n greatest_increase_rev = max(monthly_change)\n greatest_decrease_rev = min(monthly_change)\n greatest_increase_month = str(months[monthly_change.index(greatest_increase_rev)+1])\n greatest_decrease_month = str(months[monthly_change.index(greatest_decrease_rev)+1])\n\n print(\"Greatest increase in profits: \", greatest_increase_month, \" $\", str(greatest_increase_rev))\n print(\"Greatest decrease in profits: \", greatest_decrease_month, \" $\", str(greatest_decrease_rev))\n\n\n output_file = open(\"output.txt\", \"w\")\n newline1 = \"Financial Analysis\"\n newline2 = \"---------------------\"\n #print(\"Total months: \", str(len(months)))\n #print(\"Total: $\" + str(sum(Total)))\n newline3 = \"Total months: \" + str(len(months))\n newline4 = \"Total: $\" + str(sum(Total))\n #print(\"Average of changes in Profit/Losses: $\", str(formatted_total_average))\n newline5 = \"Average of changes in Profit/Losses: $\" + str(formatted_total_average)\n newline6 = \"Greatest increase in profits: \" + greatest_increase_month + \" $\" + str(greatest_increase_rev)\n newline7 = \"Greatest decrease in profits: \" + greatest_decrease_month + \" $\" + str(greatest_decrease_rev)\n #{}\\n breaks to new line\n output_file.write('{}\\n{}\\n{}\\n{}\\n{}\\n{}\\n{}\\n'.format(newline1,newline2,newline3, newline4,newline5,newline6,newline7))\n","sub_path":"PyBank/pybank.py","file_name":"pybank.py","file_ext":"py","file_size_in_byte":2862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"395480960","text":"# Copyright 2015 Huawei Technologies Co., Ltd.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\"\"\"\nTricircle base exception handling.\n\"\"\"\n\nimport six\n\nfrom neutron_lib import exceptions\nfrom oslo_log import log as logging\n\nfrom tricircle.common.i18n import _\n\n\nLOG = logging.getLogger(__name__)\n\n\nclass TricircleException(Exception):\n \"\"\"Base Tricircle Exception.\n\n To correctly use this class, inherit from it and define\n a 'message' property. That message will get printf'd\n with the keyword arguments provided to the constructor.\n \"\"\"\n message = _(\"An unknown exception occurred.\")\n code = 500\n headers = {}\n safe = False\n\n def __init__(self, message=None, **kwargs):\n\n self.kwargs = kwargs\n self.kwargs['message'] = message\n\n if 'code' not in self.kwargs:\n self.kwargs['code'] = self.code\n\n for k, v in self.kwargs.items():\n if isinstance(v, Exception):\n self.kwargs[k] = six.text_type(v)\n\n if self._should_format():\n try:\n message = self.message % kwargs\n except Exception:\n\n # kwargs doesn't match a variable in the message\n # log the issue and the kwargs\n exc_info = _('Exception class %s in string '\n 'format operation') % type(self).__name__\n format_str = _('%(exception_info)s ; %(format_key)s : '\n '%(format_value)s')\n for name, value in kwargs.items():\n exc_info = format_str % {\n 'exception_info': exc_info,\n 'format_key': name,\n 'format_value': six.text_type(value)}\n\n exc_info = _('%(message)s ; %(exception_info)s') % {\n 'message': self.message, 'exception_info': exc_info}\n LOG.exception(exc_info)\n\n # no rerasie\n # exc_info = sys.exc_info()\n # if CONF.fatal_exception_format_errors:\n # six.reraise(*exc_info)\n\n # at least get the core message out if something happened\n message = self.message\n\n elif isinstance(message, Exception):\n message = six.text_type(message)\n\n self.msg = message\n super(TricircleException, self).__init__(message)\n\n def _should_format(self):\n\n if self.kwargs['message'] is None and '%(message)' in self.message:\n LOG.error('\\%(message)s in message '\n 'but init parameter is None')\n\n return self.kwargs['message'] is None or '%(message)' in self.message\n\n def __unicode__(self):\n return six.text_type(self.msg)\n\n\nclass BadRequest(TricircleException):\n message = _('Bad %(resource)s request: %(msg)s')\n\n\nclass NotFound(TricircleException):\n message = _(\"Resource could not be found.\")\n code = 404\n safe = True\n\n\nclass Conflict(TricircleException):\n pass\n\n\nclass NotAuthorized(TricircleException):\n message = _(\"Not authorized.\")\n\n\nclass ServiceUnavailable(TricircleException):\n message = _(\"The service is unavailable\")\n\n\nclass AdminRequired(NotAuthorized):\n message = _(\"User does not have admin privileges\")\n\n\nclass PolicyNotAuthorized(NotAuthorized):\n message = _(\"Policy doesn't allow this operation to be performed.\")\n\n\nclass InUse(TricircleException):\n message = _(\"The resource is inuse\")\n\n\nclass InvalidConfigurationOption(TricircleException):\n message = _(\"An invalid value was provided for %(opt_name)s: \"\n \"%(opt_value)s\")\n\n\nclass EndpointNotAvailable(TricircleException):\n message = \"Endpoint %(url)s for %(service)s is not available\"\n\n def __init__(self, service, url):\n super(EndpointNotAvailable, self).__init__(service=service, url=url)\n\n\nclass EndpointNotUnique(TricircleException):\n message = \"Endpoint for %(service)s in %(pod)s not unique\"\n\n def __init__(self, pod, service):\n super(EndpointNotUnique, self).__init__(pod=pod, service=service)\n\n\nclass EndpointNotFound(TricircleException):\n message = \"Endpoint for %(service)s in %(pod)s not found\"\n\n def __init__(self, pod, service):\n super(EndpointNotFound, self).__init__(pod=pod, service=service)\n\n\nclass ResourceNotFound(TricircleException):\n message = \"Could not find %(resource_type)s: %(unique_key)s\"\n\n def __init__(self, model, unique_key):\n resource_type = model.__name__.lower()\n super(ResourceNotFound, self).__init__(resource_type=resource_type,\n unique_key=unique_key)\n\n\nclass ResourceNotSupported(TricircleException):\n message = \"%(method)s method not supported for %(resource)s\"\n\n def __init__(self, resource, method):\n super(ResourceNotSupported, self).__init__(resource=resource,\n method=method)\n\n\nclass Invalid(TricircleException):\n message = _(\"Unacceptable parameters.\")\n code = 400\n\n\nclass InvalidInput(Invalid):\n message = _(\"Invalid input received: %(reason)s\")\n\n\nclass ExternalNetPodNotSpecify(TricircleException):\n message = \"Pod for external network not specified\"\n\n def __init__(self):\n super(ExternalNetPodNotSpecify, self).__init__()\n\n\nclass PodNotFound(NotFound):\n message = \"Pod %(region_name)s could not be found.\"\n\n def __init__(self, region_name):\n super(PodNotFound, self).__init__(region_name=region_name)\n\n\n# parameter validation error\nclass ValidationError(TricircleException):\n message = _(\"%(msg)s\")\n code = 400\n\n\n# parameter validation error\nclass HTTPForbiddenError(TricircleException):\n message = _(\"%(msg)s\")\n code = 403\n\n\nclass Duplicate(TricircleException):\n pass\n\n\nclass ServerMappingsNotFound(NotFound):\n message = _('Instance %(server_id)s could not be found.')\n\n\nclass VolumeMappingsNotFound(NotFound):\n message = _('Volume %(volume_id)s could not be found')\n\n\nclass RoutingCreateFail(TricircleException):\n message = _(\"Fail to create %s routing entry %(_type)s\")\n\n def __init__(self, _type):\n super(RoutingCreateFail, self).__init__(_type=_type)\n\n\nclass RoutingBindFail(TricircleException):\n message = _(\"Fail to bind top and bottom %(_type)s\")\n\n def __init__(self, _type):\n super(RoutingBindFail, self).__init__(_type=_type)\n\n\nclass RouterNetworkLocationMismatch(exceptions.InvalidInput):\n message = _(\"router located in %(router_az_hint)s, but network located \"\n \"in %(net_az_hints)s, location mismatch.\")\n\n def __init__(self, router_az_hints, net_az_hints):\n super(RouterNetworkLocationMismatch, self).__init__(\n router_az_hint=router_az_hints, net_az_hints=net_az_hints)\n\n\nclass ResourceIsInDeleting(TricircleException):\n message = 'resource is in deleting now'\n code = 204\n","sub_path":"tricircle/common/exceptions.py","file_name":"exceptions.py","file_ext":"py","file_size_in_byte":7425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"3297393","text":"#program wypisujący pary liczb zaprzyjaźnionych mniejszych od n\n\ndef szukanie_dzielnikow(a):\n dzielniki = []\n for i in range(1,a):\n if not a%i:\n dzielniki.append(i)\n return dzielniki\n \ndef sumowanie_dzielników(a):\n dzielniki = szukanie_dzielnikow(a)\n suma = 0\n for i in range(len(dzielniki)):\n suma += dzielniki[i]\n return suma\n \nn = int(input(\"Podaj liczbe n:\"))\n\nprint(\"liczby zaprzyjaźnione:\")\nfor k in range(1,n):\n suma_k = sumowanie_dzielników(k)\n suma_j = sumowanie_dzielników(suma_k)\n if k == suma_j:\n if not k == suma_k:\n print(k, suma_k)\n\nprint(\"liczby doskonałe:\") \nfor k in range(1,n):\n if sumowanie_dzielników(k) == k:\n print(k)","sub_path":"Kolokwium/liczby_zaprzyjaźnione.py","file_name":"liczby_zaprzyjaźnione.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"146584903","text":"\nfrom django.forms import models\nfrom mezzanine.blog.models import Post, Comment\n\n\nclass CommentForm(models.ModelForm):\n \"\"\"\n Model form for ``Comment`` against a ``BlogPost``.\n \"\"\"\n\n class Meta:\n model = Comment\n fields = (\"name\", \"email\", \"website\", \"body\",)\n\n\nclass PostForm(models.ModelForm):\n \"\"\"\n Model form for ``BlogPost`` that provides the quick blog panel in the\n admin dashboard.\n \"\"\"\n\n class Meta:\n model = Post\n fields = (\"title\", \"content\", \"status\")\n\n def __init__(self):\n super(PostForm, self).__init__()\n self.fields[\"status\"].widget = forms.HiddenInput()\n","sub_path":"mezzanine/blog/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"127601257","text":"print(\"Welcome to PM Bank\")\r\nrestart = ('y')\r\nchances = 3\r\nBalance = 51000\r\nwhile chances >=0:\r\n pin = int(input(\"Please enter your pin: \"))\r\n if pin == (1234):\r\n print(\"you entered your pin correctly\\n\")\r\n while restart not in ('n', 'N', 'NO', 'no'):\r\n print(\"press 1 for your balance\\n\")\r\n print(\"press 2 for withdrawl\\n\")\r\n print(\"press 3 for deposit\\n\")\r\n print(\"press 4 for return card\\n\")\r\n option = int(input(\"what would you like to choose? \"))\r\n if option == 1:\r\n print(\"your account balance is Rs.\",Balance, '\\n')\r\n restart = input(\"would you like to go back\\n\")\r\n if restart in ('n', 'N', 'NO', 'no'):\r\n print(\"Thank you\")\r\n break\r\n elif option == 2:\r\n option2 = ('y')\r\n withdrawl = float(input('please enter the amount? \\n100,500,2000: '))\r\n if withdrawl <= Balance:\r\n balance = Balance - withdrawl\r\n print (\"your account balance is\",balance,)\r\n restart = input('would you like to go back? ')\r\n if restart in ('n', 'N', 'NO', 'no'):\r\n print(\"Thank you\")\r\n break\r\n elif withdrawl > Balance:\r\n print(\"Insuffient Balance\\n\")\r\n break\r\n elif option == 3:\r\n deposit = float(input(\"Please enter the amount you want to deposit\\n\"))\r\n Deposit = deposit + Balance\r\n print(\"your account balance is now\" ,Deposit)\r\n restart = input('would you like to go back? ')\r\n if restart in ('n', 'N', 'NO', 'no'):\r\n print(\"Thank you\")\r\n break\r\n elif option ==4:\r\n print(\"Please wait\")\r\n print(\"Thank you, visit again\")\r\n break\r\n else:\r\n print(\"\\nPlease select correct option\\n\")\r\n restart = ('y')\r\n elif pin != 1234:\r\n chances = chances - 1\r\n if chances ==1:\r\n print(\"Warning: Last Chance, if pin incorrect your card will blocked for 24hours\")\r\n if chances ==0:\r\n print(\"\\nsorry, you exceed the limin, try after 24 hours\\n\")\r\n break","sub_path":"ATM code.py","file_name":"ATM code.py","file_ext":"py","file_size_in_byte":2398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"311087185","text":"'''\nExercise 2: Hello\n\nStephenson, B., 2014. The Python workbook A Brief\nIntroduction with Exercises and Solutions. New York\nDordrecht London: Springer.\n'''\n\ndef main():\n name = \"\"\n print(\"Please, Input your name: \")\n name = input()\n print(\"Hello\",name)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Exercises/Exercise_2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"312161684","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport logging\nimport configparser\n\nlog = logging.getLogger(__name__)\n\n\nclass Conf:\n def __init__(self):\n \"\"\"\n [default]\n api_key = ...\n url = ...\n log_file = ...\n \"\"\"\n self.conf = configparser.ConfigParser()\n self.conf.read(os.path.join(os.getenv('HOME'), '.notifyme.conf'))\n\n def __getattr__(self, item):\n try:\n return self.conf.get(section='default', option=item)\n except configparser.NoOptionError:\n return None\n except configparser.NoSectionError:\n return None\n\n\ndef logging_init(debug=False, write_log=False):\n if debug:\n lvl = 'DEBUG'\n else:\n lvl = 'WARNING'\n\n logging.getLogger().setLevel(logging.NOTSET)\n fmt = logging.Formatter(\n '%(asctime)s : %(name)s : %(levelname)s : %(message)s')\n conf = Conf()\n\n if conf.log_file and write_log:\n fh = logging.FileHandler(conf.log_file)\n fh.setLevel(level='DEBUG')\n fh.setFormatter(fmt)\n logging.getLogger().addHandler(fh)\n\n ch = logging.StreamHandler()\n ch.setLevel(level=lvl)\n ch.setFormatter(fmt)\n logging.getLogger().addHandler(ch)\n","sub_path":"notifymetools/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"247309343","text":"import rltk\n\nb1 = rltk.BlockArrayWriter()\nb1.write('1', 'a')\nb1.write('2', 'b')\nb1.write('2', 'c')\nb1.close()\nb1 = rltk.BlockArrayReader(b1.get_handler())\nprint('--- block1 ---')\nfor bb in b1:\n print(bb)\n\nb2 = rltk.BlockArrayWriter()\nb2.write('1', 'a')\nb2.write('1', 'd')\nb2.write('2', 'c')\nb2.close()\nb2 = rltk.BlockArrayReader(b2.get_handler())\nprint('--- block2 ---')\nfor bb in b2:\n print(bb)\n\nb3 = rltk.BlockArrayWriter()\nrltk.BlockingHelper(reader1=b1, reader2=b2).union(writer=b3)\nb3 = rltk.BlockArrayReader(b3.get_handler())\nprint('--- union ---')\nfor bb in b3:\n print(bb)","sub_path":"examples/blocking/block_operations.py","file_name":"block_operations.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"366702102","text":"#!usr/bin/env python\n# _*_ coding:utf-8 _*_\n\n\"\"\"\n@author:chaowei\n@file: regression_example.py\n@time: 2019/09/16\n\"\"\"\n\nfrom __future__ import print_function\n\n# from caffe2.python import core, cnn, net_drawer, workspace, visualize\nfrom caffe2.python import core, cnn, net_drawer, workspace\nimport numpy as np\n# from IPython import display\n# from matplotlib import pyplot\n\n# ------------------------------------------------\n\"\"\"create init net.\n\"\"\"\ninit_net = core.Net(\"init\")\nW_gt = init_net.GivenTensorFill([], \"W_gt\", shape=[1, 2], values=[2.0, 1.5]) # the ground true parameters.\nB_gt = init_net.GivenTensorFill([], \"B_gt\", shape=[1], values=[0.5])\n\n# Constant value ONE is used in weighted sum when updating parameters.\nONE = init_net.ConstantFill([], \"ONE\", shape=[1], value=1.)\n\n# ITER is the iterator count.\nITER = init_net.ConstantFill([], \"ITER\", shape=[1], value=0, dtype=core.DataType.INT32)\n\n# For the parameters to be learned: we randomly initialize weight\n# from [-1, 1] and init bias with 0.0.\nW = init_net.UniformFill([], \"W\", shape=[1, 2], min=-1., max=1.)\nB = init_net.ConstantFill([], \"B\", shape=[1], value=0.0)\nprint('Created init net.')\n# --------------------------------------------------------------\n\n# --------------------------------------------------------------\n\"\"\"\ncreate train net.\n\"\"\"\ntrain_net = core.Net(\"train\")\n# First, we generate random samples of X and create the ground truth.\nX = train_net.GaussianFill([], \"X\", shape=[64, 2], mean=0.0, std=1.0, run_once=0)\nY_gt = X.FC([W_gt, B_gt], \"Y_gt\")\n# We add Gaussian noise to the ground truth\nnoise = train_net.GaussianFill([], \"noise\", shape=[64, 1], mean=0.0, std=1.0, run_once=0)\nY_noise = Y_gt.Add(noise, \"Y_noise\")\n\n# Note that we do not need to propagate the gradients back through Y_noise,\n# so we mark StopGradient to notify the auto differentiating algorithm\n# to ignore this path.\nY_noise = Y_noise.StopGradient([], \"Y_noise\")\n\nY_pred = X.FC([W, B], \"Y_pred\")\n\n# The loss function is computed by a squared L2 distance, and then averaged\n# over all items in the minibatch.\ndist = train_net.SquaredL2Distance([Y_noise, Y_pred], \"dist\")\nloss = dist.AveragedLoss([], [\"loss\"])\n\n# Get gradients for all the computations above.\ngradient_map = train_net.AddGradientOperators([loss])\n# graph = net_drawer.GetPydotGraph(train_net.Proto().op, \"train\", rankdir=\"LR\")\n# display.Image(graph.create_png(), width=800)\n\n# Increment the iteration by one.\ntrain_net.Iter(ITER, ITER)\n# Compute the learning rate that corresponds to the iteration.\nLR = train_net.LearningRate(ITER, \"LR\", base_lr=-0.1,\n policy=\"step\", stepsize=20, gamma=0.9)\n\n# Weighted sum\ntrain_net.WeightedSum([W, ONE, gradient_map[W], LR], W)\ntrain_net.WeightedSum([B, ONE, gradient_map[B], LR], B)\n\n# Let's show the graph again.\n# graph = net_drawer.GetPydotGraph(train_net.Proto().op, \"train\", rankdir=\"LR\")\n# display.Image(graph.create_png(), width=800)\n# --------------------------------------------------------------------------------------\n\n\"\"\"\ninitialize the net and create net in workspace.\n\"\"\"\nworkspace.RunNetOnce(init_net)\nworkspace.CreateNet(train_net)\nprint(\"Before training, W is: {}\".format(workspace.FetchBlob(\"W\")))\nprint(\"Before training, B is: {}\".format(workspace.FetchBlob(\"B\")))\n\n\"\"\"\ntrain the net.\n\"\"\"\n# run the train net 100 times\nfor i in range(10000):\n print(\"i=\", i)\n workspace.RunNet(train_net.Proto().name)\n\nprint(\"After training, W is: {}\".format(workspace.FetchBlob(\"W\")))\nprint(\"After training, B is: {}\".format(workspace.FetchBlob(\"B\")))\nprint(\"Ground truth W is: {}\".format(workspace.FetchBlob(\"W_gt\")))\nprint(\"Ground truth B is: {}\".format(workspace.FetchBlob(\"B_gt\")))\n\n\n\nif __name__ == '__main__':\n\n pass","sub_path":"normalTest/regression_example.py","file_name":"regression_example.py","file_ext":"py","file_size_in_byte":3717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"94001065","text":"from ..utils.validation import check_equal_index\nfrom ..utils.transformations import tabularize\nfrom .series_to_series import RandomIntervalSegmenter\nfrom sklearn.utils.validation import check_is_fitted\nimport numpy as np\nimport pandas as pd\n\n__all__ = ['RandomIntervalFeatureExtractor']\n\n\nclass RandomIntervalFeatureExtractor(RandomIntervalSegmenter):\n \"\"\"\n Transformer that segments time-series into random intervals\n and subsequently extracts series-to-primitives features from each interval.\n\n n_intervals: str{'sqrt', 'log', 'random'}, int or float, optional (default='sqrt')\n Number of random intervals to generate, where m is length of time series:\n - If \"log\", log of m is used.\n - If \"sqrt\", sqrt of m is used.\n - If \"random\", random number of intervals is generated.\n - If int, n_intervals intervals are generated.\n - If float, int(n_intervals * m) is used with n_intervals giving the fraction of intervals of the\n time series length.\n\n For all arguments relative to the length of the time series, the generated number of intervals is\n always at least 1.\n\n features: list of functions, optional (default=None)\n Applies each function to random intervals to extract features.\n If None, only the mean is extracted.\n\n random_state: : int, RandomState instance, optional (default=None)\n - If int, random_state is the seed used by the random number generator;\n - If RandomState instance, random_state is the random number generator;\n - If None, the random number generator is the RandomState instance used\n by `np.random`.\n\n check_input: bool, optional (default=True)\n When set to ``True``, inputs will be validated, otherwise inputs are assumed to be valid\n and no checks are performed. Use with caution.\n \"\"\"\n\n def __init__(self, n_intervals='sqrt', min_length=None, features=None, random_state=None, check_input=True):\n super(RandomIntervalFeatureExtractor, self).__init__(\n n_intervals=n_intervals,\n min_length=min_length,\n random_state=random_state,\n check_input=check_input\n )\n\n # Check input of feature calculators, i.e list of functions to be applied to time-series\n if features is None:\n self.features = [np.mean]\n elif isinstance(features, list) and all([callable(func) for func in features]):\n self.features = features\n else:\n raise ValueError('Features must be list containing only functions (callables) to be '\n 'applied to the data columns')\n\n def transform(self, X, y=None):\n \"\"\"\n Transform X, segments time-series in each column into random intervals using interval indices generated\n during `fit` and extracts features from each interval.\n\n Parameters\n ----------\n X : nested pandas.DataFrame of shape [n_samples, n_features]\n Nested dataframe with time-series in cells.\n\n Returns\n -------\n Xt : pandas.DataFrame\n Transformed pandas DataFrame with same number of rows and one column for each generated interval.\n \"\"\"\n\n # Check is fit had been called\n check_is_fitted(self, 'intervals_')\n\n # check inputs\n if self.check_input:\n # Check that the input is of the same shape as the one passed\n # during fit.\n if X.shape[1] != self.input_shape_[1]:\n raise ValueError('Number of columns of input is different from what was seen'\n 'in `fit`')\n # Input validation\n if not all([np.array_equal(fit_idx, trans_idx) for trans_idx, fit_idx in zip(check_equal_index(X),\n self.input_indexes_)]):\n raise ValueError('Indexes of input time-series are different from what was seen in `fit`')\n\n n_rows, n_cols = X.shape\n n_features = len(self.features)\n n_cols_intervals = sum([intervals.shape[0] for intervals in self.intervals_])\n\n # Compute features on intervals.\n Xt = np.zeros((n_rows, n_features * n_cols_intervals)) # Allocate output array for transformed data\n self.columns_ = []\n i = 0\n for c, (colname, col) in enumerate(X.items()):\n # Tabularize each column assuming series have equal indexes in any given column.\n # TODO generalise to non-equal-index cases\n arr = tabularize(col, return_array=True)\n for func in self.features:\n # TODO generalise to series-to-series functions and function kwargs\n for start, end in self.intervals_[c]:\n interval = arr[:, start:end]\n\n # Try to use optimised computations over axis if possible, otherwise iterate over rows.\n try:\n Xt[:, i] = func(interval, axis=1)\n except TypeError as e:\n if str(e) == f\"{func.__name__}() got an unexpected keyword argument 'axis'\":\n Xt[:, i] = np.apply_along_axis(func, 1, interval)\n else:\n raise\n i += 1\n self.columns_.append(f'{colname}_{start}_{end}_{func.__name__}')\n Xt = pd.DataFrame(Xt)\n Xt.columns = self.columns_\n return Xt\n","sub_path":"sktime/transformers/series_to_tabular.py","file_name":"series_to_tabular.py","file_ext":"py","file_size_in_byte":5520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"287753458","text":"import json,os,flask\r\nfrom flask import request, jsonify\r\n\r\napp = flask.Flask(__name__)\r\napp.config[\"DEBUG\"] = True\r\n \r\n\r\nif os.path.exists(\"books.json\"):\r\n books = json.load(open(\"books.json\"))\r\nelse:\r\n print(\"file not found\") \r\n@app.route('/', methods=['GET'])\r\ndef home():\r\n return '''

PROJET : api

Utilisation des méthodes GET, POST, PUT, DELETE

'''\r\n\r\n@app.route('/api/books', methods=['GET'])\r\ndef api_all():\r\n return jsonify(books)\r\n\r\n@app.route('/api/books/', methods=['GET'])\r\ndef api_id(id):\r\n book = [book for book in books if book['id']==id]\r\n return jsonify({\"books\":book})\r\n\r\n@app.route('/api/books/delete/',methods=['DELETE'])\r\ndef api_delete(id):\r\n book = [book for book in books if book['id']== id]\r\n books.remove(book[0])\r\n return jsonify({'books:':books})\r\n\r\n@app.route('/api/books/add',methods=['POST'])\r\ndef post():\r\n user = request.get_json()\r\n user['id'] = len(books)+1\r\n books.append(user)\r\n return jsonify(user)\r\n\r\n@app.route('/api/books/update',methods=['PUT'])\r\ndef put():\r\n user = request.get_json()\r\n for i,u in enumerate(books):\r\n if u['id']==user['id']:\r\n books[i] = user \r\n return {}\r\n\r\n\r\napp.run()\r\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"136150068","text":"# -*- coding: utf-8 -*-\nimport qrcode\nimport os\nfrom picamera.array import PiRGBArray\nfrom picamera import PiCamera\nimport cv2\nfrom pyzbar.pyzbar import decode\nimport random\nimport sys\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport time\nfrom IPython import get_ipython\nfrom IPython.display import display\n# CAMERA_PORT = 0\n\nasync def init_camera_settings(settings):\n \n return rawCapture\n\nasync def make_qr_code(data, type):\n return qrcode.make(data)\n\nasync def save_qr_as_jpg(data, output_name):\n data.save(output_name)\n\nasync def load_qr_images_from_path(data_path):\n data = []\n path = os.path.join(data_path)\n for img in os.listdir(path):\n try:\n img_array = cv2.imread(os.path.join(path, img))\n data.append([img_array, img])\n except Exception as e:\n print (e)\n pass\n print(data)\n return data\n\nasync def decode_input(img):\n img_data = []\n for code in decode(img[0]):\n img_data.append([code.data.decode('utf-8'), code.type])\n print(img_data)\n\n\nasync def decode_input_camera(cam):\n img_data = []\n\n # with PiCamera() as camera:\n camera = PiCamera()\n camera.resolution = (1024, 768)\n camera.framerate = 60\n rawCapture = PiRGBArray(camera, size=(1024, 768))\n \n\n for frame in camera.capture_continuous(rawCapture, format=\"bgr\"):\n image = frame.array\n cv2.imshow('Testing-QR', image)\n for code in decode(image):\n img_data.append([code.data.decode('utf-8'), code.type])\n cv2.destroyAllWindows()\n camera.close()\n return img_data\n \n rawCapture.truncate(0)\n \n if cv2.waitKey(1) & 0xFF == ord('q'):\n camera.close()\n break","sub_path":"QR_rasp_asyn.py","file_name":"QR_rasp_asyn.py","file_ext":"py","file_size_in_byte":1781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"585229622","text":"\nfrom django.urls import path\n\nfrom . import views\n\nfrom django.contrib.auth.models import User, Group, Permission\nfrom rest_framework.serializers import ModelSerializer\n\n\nfrom rest_framework.generics import ListAPIView, RetrieveAPIView, RetrieveUpdateDestroyAPIView\nfrom rest_framework.relations import PrimaryKeyRelatedField, SlugRelatedField\n\nclass GroupSerializer(ModelSerializer):\n class Meta:\n model = User\n fields = ('email', 'username', 'groups')\n\n\n#http://www.django-rest-framework.org/api-guide/relations/#primarykeyrelatedfield\n\n\n\nclass UserSerializer(ModelSerializer):\n # groups = PrimaryKeyRelatedField(queryset=Group.objects.all(), many=True)\n groups = SlugRelatedField(\n many=True,\n queryset=Group.objects.all(),\n slug_field='name',\n required = False\n )\n\n user_permissions = SlugRelatedField(\n many=True,\n queryset=Permission.objects.all(),\n slug_field='codename',\n required=False\n )\n\n\n class Meta:\n model = User\n fields = ('email', 'username', 'groups', 'user_permissions')\n\n\nclass UserList(ListAPIView):\n queryset = User.objects.all()\n serializer_class = UserSerializer\n #permission_classes = (,)\n\n\nclass UserDetails(RetrieveUpdateDestroyAPIView):\n queryset = User.objects.all()\n serializer_class = UserSerializer\n\n\n\nurlpatterns = [\n path('', UserList.as_view(), name='user-list'),\n path('/', UserDetails.as_view(), name='user-details'),\n]","sub_path":"users/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"553697941","text":"#!/usr/bin/env python\n\"\"\"\nSpark script to parse and aggregate DBS and PhEDEx records on HDFS.\n\"\"\"\n\n# System modules\nimport os\nimport time\nimport datetime\n\n# Pyspark modules\nfrom pyspark import SparkContext, StorageLevel\nfrom pyspark.sql import HiveContext\nfrom pyspark.sql.functions import struct, array, udf, countDistinct\nfrom pyspark.sql.types import LongType, IntegerType, DoubleType, StringType, StructType, StructField\n\n# CMSSpark modules\nfrom CMSSpark.spark_utils import dbs_tables, phedex_tables, print_rows\nfrom CMSSpark.spark_utils import spark_context, split_dataset\nfrom CMSSpark.utils import info_save, bytes_to_readable\nfrom CMSSpark.conf import OptionParser\n\nAVERAGES_TIME_DATA_FILE = 'spark_exec_time_averages.txt'\n\ndef get_options():\n opts = OptionParser('averages')\n\n return opts.parser.parse_args()\n\ndef get_script_dir():\n return os.path.dirname(os.path.abspath(__file__))\n\ndef get_destination_dir():\n return '%s/../../../bash/report_averages' % get_script_dir()\n\ndef quiet_logs(sc):\n \"\"\"\n Sets logger's level to ERROR so INFO logs would not show up.\n \"\"\"\n logger = sc._jvm.org.apache.log4j\n logger.LogManager.getRootLogger().setLevel(logger.Level.ERROR)\n\ndef run(fout, yarn=None, verbose=None):\n \"\"\"\n Main function to run pyspark job. It requires a schema file, an HDFS directory\n with data and optional script with mapper/reducer functions.\n \"\"\"\n \n # define spark context, it's main object which allow to communicate with spark\n ctx = spark_context('cms', yarn, verbose)\n\n quiet_logs(ctx)\n\n sqlContext = HiveContext(ctx)\n \n sqlContext.setConf(\"spark.sql.files.ignoreCorruptFiles\",\"true\")\n sqlContext.sql(\"set spark.sql.files.ignoreCorruptFiles=true\")\n\n # date, site, dataset, size, replica_date, groupid\n schema = StructType([\n StructField(\"date\", StringType(), True),\n StructField(\"site\", StringType(), True),\n StructField(\"dataset\", StringType(), True),\n StructField(\"size\", DoubleType(), True),\n StructField(\"replica_date\", StringType(), True),\n StructField(\"groupid\", StringType(), True)\n ])\n\n df = sqlContext.read.format('com.databricks.spark.csv')\\\n .options(treatEmptyValuesAsNulls='true', nullValue='null')\\\n .load('hdfs:///cms/phedex/*/*/*/part-*', schema=schema)\n # .load('hdfs:///cms/phedex/2017/03/*/part-00000', schema=schema)\n\n # Remove all tape sites\n is_tape = lambda site: site.endswith('_MSS') | site.endswith('_Buffer') | site.endswith('_Export')\n df = df.where(is_tape(df.site) == False)\n\n extract_campaign_udf = udf(lambda dataset: dataset.split('/')[2].split('-')[0])\n extract_tier_udf = udf(lambda dataset: dataset.split('/')[3])\n date_to_timestamp_udf = udf(lambda date: time.mktime(datetime.datetime.strptime(date, \"%Y%m%d\").timetuple()))\n timestamp_to_date_udf = udf(lambda timestamp: datetime.datetime.fromtimestamp(float(timestamp)).strftime('%Y%m%d'))\n days_delta_udf = udf(lambda t1, t2: (datetime.datetime.fromtimestamp(float(t1)) - datetime.datetime.fromtimestamp(float(t2))).days + 1)\n count_udf = udf(lambda list: len(list))\n\n df = df.withColumn('campaign', extract_campaign_udf(df.dataset))\n df = df.withColumn('tier', extract_tier_udf(df.dataset))\n df = df.withColumn('date_min', date_to_timestamp_udf(df.date))\n df = df.withColumn('date_max', date_to_timestamp_udf(df.date))\n df = df.withColumn('size_average', df.size)\n\n df = df.groupBy(['campaign', 'tier'])\\\n .agg({'date_min': 'min', 'date_max': 'max', 'date': 'collect_set', 'size_average': 'avg', 'size': 'max'})\\\n .withColumnRenamed('min(date_min)', 'date_min')\\\n .withColumnRenamed('max(date_max)', 'date_max')\\\n .withColumnRenamed('collect_set(date)', 'days_count')\\\n .withColumnRenamed('avg(size_average)', 'size_average')\\\n .withColumnRenamed('max(size)', 'size_max')\\\n\n df = df.withColumn('period_days', days_delta_udf(df.date_max, df.date_min))\\\n .withColumn('days_count', count_udf(df.days_count))\\\n .withColumn('date_min', timestamp_to_date_udf(df.date_min))\\\n .withColumn('date_max', timestamp_to_date_udf(df.date_max))\n \n df = df.withColumn('existence_in_period', df.days_count / df.period_days)\n df = df.withColumn('average_size_in_period', df.size_average * df.existence_in_period)\n\n # campaign, tier, date_max, date_min, days_count, size_max, size_average, period_days, existence_in_period, average_size_in_period\n\n # write out results back to HDFS, the fout parameter defines area on HDFS\n # it is either absolute path or area under /user/USERNAME\n if fout:\n df.write.format(\"com.databricks.spark.csv\")\\\n .option(\"header\", \"true\").save(fout)\n \n ctx.stop()\n\n@info_save('%s/%s' % (get_destination_dir(), AVERAGES_TIME_DATA_FILE))\ndef main():\n \"Main function\"\n opts = get_options()\n print(\"Input arguments: %s\" % opts)\n \n fout = opts.fout\n verbose = opts.verbose\n yarn = opts.yarn\n \n run(fout, yarn, verbose)\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/python/CMSSpark/reports/aggregate_averages.py","file_name":"aggregate_averages.py","file_ext":"py","file_size_in_byte":5187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"588634469","text":"import streamlit as st\nfrom requests_toolbelt.multipart.encoder import MultipartEncoder\nimport requests\nfrom PIL import Image\nimport io\nimport os\n\n# st.title('Image Captioning')\nst.markdown(\"

Image Captioning

\", unsafe_allow_html=True)\n\n\n# fastapi endpoint\nurl = os.environ.get(\"BACKEND_URL\",'http://localhost:5000')\nendpoint = '/upload'\n\n# st.write('''Caption for the uploaded image can be generated here.''') # description and instructions\nst.markdown(\"

Caption for the uploaded image can be generated here

\", unsafe_allow_html=True)\n\nimage = st.file_uploader('') # image upload widget\n\n\ndef process(image, server_url):\n\n m = MultipartEncoder(\n fields={'file': ('filename', image, 'image/jpeg')}\n )\n\n r = requests.post(server_url,\n data=m,\n headers={'Content-Type': m.content_type},\n timeout=5000)\n\n return r\n\nif image is None:\n # st.error(\"Insert an image!\") # handle case with no image\n st.markdown(\"

Insert an image!

\", unsafe_allow_html=True)\n\nelse:\n caption = process(image, url+endpoint)\n st.image([image], width=700)\n st.markdown(f\"

{caption.json().get('caption')}

\", unsafe_allow_html=True)\n\n\n# col1, col2, col3 = st.columns(3)\n# col1.text('')\n# if col2.button('Get Caption'):\n \n# if image == None:\n# st.error(\"Insert an image!\") # handle case with no image\n# else:\n# caption = process(image, url+endpoint)\n# st.image([image], width=700)\n# # st.write(caption.json().get('caption')) # output dyptich\n# st.markdown(f\"

{caption.json().get('caption')}

\", unsafe_allow_html=True)\n ","sub_path":"ui/webapp.py","file_name":"webapp.py","file_ext":"py","file_size_in_byte":1912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"33256635","text":"import cv2\nimport numpy as np\nimport graph_3 as g3\nNO_OP = 1000000000\n\n\ndef calculate_energy(dsi, max_d):\n energy = np.ones_like(dsi) * NO_OP\n w = dsi.shape[0]\n energy[w-1, :] = np.copy(dsi[w-1, :])\n energy[:, w-1] = np.copy(dsi[:, w-1])\n\n for i in range(w-2, -1, -1):\n for j in range(i, i-max_d, -1):\n if j < 0:\n continue\n right = 100000000\n bottom = 100000000\n bottom_right = 100000000\n if energy[i, j+1] != NO_OP:\n right = energy[i, j+1]\n if energy[i+1, j] != NO_OP:\n bottom = energy[i+1, j]\n if energy[i+1, j+1] != NO_OP:\n bottom_right = energy[i+1, j+1]\n value = np.min([right, bottom, bottom_right])\n energy[i, j] = value + dsi[i, j]\n\n energy[w-1, 0:w-max_d] = NO_OP\n return energy\n\n\ndef extract_path(energy):\n w = energy.shape[0]\n line_disparity = np.zeros(w)\n j = 0\n i = np.argmin(energy[:, j])\n line_disparity[j] = i - j\n\n is_done = False\n while not is_done:\n right = 100000000\n bottom = 100000000\n bottom_right = 100000000\n # print(j, i)\n if energy[i, j+1] != NO_OP:\n right = energy[i, j+1]\n if energy[i+1, j] != NO_OP:\n bottom = energy[i+1, j]\n if energy[i+1, j+1] != NO_OP:\n bottom_right = energy[i+1, j+1]\n arg_min = np.argmin([right, bottom, bottom_right])\n if arg_min == 2 and j >= 0:\n line_disparity[j] = j - i\n j = j + 1\n i = i + 1\n pass\n elif arg_min == 1 and j >= 0:\n i = i + 1\n pass\n else:\n line_disparity[j] = j - i\n j = j + 1\n pass\n\n if i >= w-1:\n break\n\n return line_disparity*-1\n\n\ndef calculate_dsi(left_scan_line, right_scan_line, w):\n dsi = np.zeros((w, w))\n\n right_scan_line = np.expand_dims(right_scan_line, axis=1)\n dsi += right_scan_line\n dsi = np.abs(dsi - left_scan_line)\n return dsi\n\n\ndef pairwise_stereo_dp(left, right, max_d):\n h = left.shape[0]\n w = left.shape[1]\n\n f = np.zeros((h,w))\n for i in range(h):\n left_scan_line = left[i, :]\n right_scan_line = right[i, :]\n dsi = calculate_dsi(left_scan_line, right_scan_line, w)\n energy = calculate_energy(dsi, max_d)\n path = extract_path(energy)\n f[i, :] = path\n # cv2.imwrite(\"output/dsi.png\", dsi + 255 - dsi.max())\n # print(i)\n\n return f\n\n\ndef pairwise_stereo_ssd(left, right, t_size, max_d):\n h = left.shape[0]\n w = left.shape[1]\n right_expanded = cv2.copyMakeBorder(np.copy(right), t_size, t_size, t_size, t_size, cv2.BORDER_REFLECT)\n left_expanded = cv2.copyMakeBorder(np.copy(left), t_size, t_size, t_size, t_size, cv2.BORDER_REFLECT)\n raw_disparity_map = np.zeros((h, w), dtype=np.float)\n\n for i in range(h):\n for j in range(t_size, w + t_size):\n left_window = left_expanded[i:i+t_size, j-t_size:j+t_size]\n right_strip = right_expanded[i:i+t_size, :]\n min_ssd = 1000000000\n min_arg = j\n for k in range(j, j + max_d):\n l = k - t_size\n r = k + t_size\n\n if l < 0 or r > w + 2*t_size:\n continue\n right_window = right_strip[:, l:r]\n ssd = np.sum(np.abs(left_window - right_window) ** 2)\n if ssd < min_ssd:\n min_ssd = ssd\n min_arg = k\n\n raw_disparity_map[i, j - t_size] = min_arg - j\n\n # for i in range(h):\n # for j in range(t_size, w + t_size):\n # right_window = right_expanded[i:i+t_size, j-t_size:j+t_size]\n # left_strip = left_expanded[i:i+t_size, :]\n # error_map = cv2.matchTemplate(left_strip, right_window, method=cv2.TM_SQDIFF)\n # error_map = np.squeeze(error_map)\n # error_map[j:w+1] = error_map.max()\n # q = np.where(error_map == error_map.min())\n # if q[0].shape[0] > 1:\n # print()\n # min_arg = np.argmin(error_map)\n # if j - min_arg < 0:\n # print()\n # raw_disparity_map[i, j - t_size] = j - min_arg - t_size\n\n return raw_disparity_map\n\n\ndef pairwise_stereo_graph_cut(left, right, labels, lambda_v, d_thresh, K, full_n, reverse):\n stereo_algorithm = g3.AlphaExpansion(left, right, labels, lambda_v=lambda_v,\n d_thresh=d_thresh, K=K, full_n=full_n, reverse=reverse)\n\n f = stereo_algorithm.calculate_disparity_map()\n\n return f\n\n","sub_path":"stereo_correspondence/stereo.py","file_name":"stereo.py","file_ext":"py","file_size_in_byte":4684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"380432292","text":"import sys\nfrom math import *\nfrom src.Container import Container\nfrom src.Game import Game\nfrom src.PWMDriver import PWMDriver\nfrom src.MotorizedPlatform import MotorizedPlatform\nfrom src.Navigation import Navigation\nfrom src.PositionWatcher import PositionWatcher\nfrom src.Lidar import Lidar\nfrom src.Switches import Switches\nfrom src.WebSocketServer import WebSocketServer\nfrom src.CommandsManager import CommandsManager\nfrom src.ArduinoManager import ArduinoManager\nfrom src.Scripts import Scripts\n#from src.Elevator import Elevator\nfrom src.Claw import Claw\nfrom time import sleep\nfrom src.Logger import LoggerManager\n\nif __name__ == '__main__':\n container = Container()\n \n logger = LoggerManager()\n logger.setLevel('debug')\n container.set('logger', logger)\n root = logger.get('Root')\n\n ws = WebSocketServer(container)\n container.set('websocket', ws)\n\n # arduinoManager = ArduinoManager(container)\n # arduinoManager.identify()\n # container.set('arduinoManager', arduinoManager)\n \n scripts = Scripts(container)\n container.set('scripts', scripts)\n\n game = Game(container)\n container.set('game', game)\n \n commandsManager = CommandsManager(container)\n container.set('commandsManager', commandsManager)\n\n positionWatcher = PositionWatcher(container)\n #positionWatcher.start()\n container.set('positionWatcher', positionWatcher)\n\n # switches = Switches(container)\n # container.set('switches', switches)\n\n driver = PWMDriver()\n container.set('driver', driver)\n\n platform = MotorizedPlatform(container)\n container.set('platform', platform)\n\n navigation = Navigation(container)\n container.set('navigation', navigation)\n \n # rightClaw = Claw(container, {'elevator': 7, 'claws': [6, 5, 4]})\n # container.set('rightClaw', rightClaw)\n \n # elevator = Elevator(container)\n # container.set('elevator', elevator)\n\n lidar = Lidar(container)\n container.set('lidar', lidar)\n \n commandsManager.init()\n\n def onPos(x, y, t):\n ws.sendData('mainPosition', [x, y, t])\n\n positionWatcher.setPositionChangedHandler(onPos)\n\n def app():\n #switches.start()\n ws.start()\n lidar.start()\n sleep(1)\n platform.stop()\n sleep(1)\n positionWatcher.reset()\n positionWatcher.start()\n \n root.info('App ready')\n # sleep(1)\n # navigation.goTo({'x':600, 'y':600, 'orientation':pi })\n # input('You confirm?')\n # navigation.goTo({ 'x': 979, 'y': 1500, 'orientation': pi, 'speed': 40 })\n while True:\n sleep(100)\n\n try:\n app()\n except KeyboardInterrupt:\n print('')\n root.error('KeyboardInterrupt: App will shutdown in a few moments')\n #switches.stop()\n scripts.stop()\n navigation.stop()\n positionWatcher.stop()\n platform.stop()\n #elevator.stop()\n ws.stop()\n platform.stop()\n sys.exit()\n\n","sub_path":"python/test_detection.py","file_name":"test_detection.py","file_ext":"py","file_size_in_byte":2773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"610995553","text":"from services.redis_connection import r\r\nfrom services.neo4j_connection import g\r\n\r\ndef register(username):\r\n user_key = f\"user:{username}\"\r\n r.sadd('users', username)\r\n user_exists = r.exists(user_key)\r\n if user_exists:\r\n return\r\n r.hmset(user_key, {\r\n 'username': username,\r\n 'created-count': 0,\r\n 'in-queue-count': 0,\r\n 'spam-checking-count': 0,\r\n 'spam-count': 0,\r\n 'sent-count': 0,\r\n 'delivered-count': 0,\r\n 'total-count': 0,\r\n })\r\n r.publish('login', username)\r\n g.register_user(username)\r\n\r\n\r\ndef logout(username):\r\n r.publish('logout', username)\r\n r.srem('users', username)\r\n\r\n\r\ndef get_users_online():\r\n return r.smembers('users')\r\n\r\n\r\ndef get_most_active_senders():\r\n top_senders_count = 10\r\n return r.zrange('sent-count', 0, top_senders_count, desc=True, withscores=True)\r\n\r\n\r\ndef get_most_active_spammers():\r\n top_spammers_count = 10\r\n return r.zrange('spam-count', 0, top_spammers_count, desc=True, withscores=True)\r\n\r\n","sub_path":"term2/lab3/services/users_service.py","file_name":"users_service.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"583231555","text":"import urllib2\nfrom bs4 import BeautifulSoup\nimport datetime\n\nclass Cheese:\n def __init__(self, irc):\n self.day = self.get_day()\n self.data = None\n self.cotd = None\n self.output = None\n self.irc = irc\n\n def get_day(self):\n return datetime.datetime.now().strftime(\"%A\")\n\n def get_page_contents(self, page):\n try:\n response = urllib2.urlopen(page)\n self.data = response.read()\n except urllib2.HTTPError as err:\n self.output = \"Could not open \" + page\n\n def get_cotd(self):\n self.get_page_contents(\"http://www.cheese.com/\")\n try:\n soup = BeautifulSoup(self.data)\n cotd_div = soup.find('div', attrs={'class': 'top-offer'})\n self.cotd = cotd_div.a.get('href').replace('/', '')\n except AttributeError:\n self.output = \"Could not find the cheese of the day\"\n\n def set_output(self):\n if self.cotd != None:\n self.output = \"The cheese of the day is: \" + self.cotd\n elif self.cotd == None and self.output == None:\n self.output = \"Unable to retrieve the cheese of the day\"\n\n def run(self):\n if self.cotd == None or self.day != self.get_day():\n self.get_cotd()\n self.set_output()\n self.irc.message(self.output)\n\ndef main(irc, nick, data, handler):\n if len(data) == 2:\n if data[0] == \"Gouda:\" and data[1] == \"cotd\":\n cheese = Cheese(irc)\n cheese.run()\n","sub_path":"modules/cheese/cheese.py","file_name":"cheese.py","file_ext":"py","file_size_in_byte":1509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"640533780","text":"'''\n\tComputes a mass balance for water for any two Flowmeters \n\tor for COD-CH4 in the reactor area for any range of dates\n\ttakes dates as inputs and outputs a summary file with mass balance info\n'''\n\nfrom __future__ import print_function\nimport matplotlib\nmatplotlib.use(\"TkAgg\",force=True) \nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as tkr\nimport matplotlib.dates as dates\nimport pylab as pl\nimport numpy as np\nimport scipy as sp\nfrom scipy import interpolate as ip\nimport pandas as pd\nimport datetime as datetime\nfrom datetime import datetime as dt\nfrom pandas import read_excel\nimport os\nimport sys\nimport functools\nfrom tkinter.filedialog import askopenfilename\nfrom tkinter.filedialog import asksaveasfilename\nfrom tkinter.filedialog import askdirectory\nimport get_lab_data as gld\nimport hmi_data_agg as hmi\nimport est_biogas_prod as ebg\n\ndef get_cod_bal(\n\tstart_dt,\n\tend_dt,\n\thmi_path = None, \n\tgasprod_path = None, \n\tfeeding_path = None,\n\ttemp_path = None\n):\n\n\tstart_dt_str = start_dt\n\tend_dt_str = end_dt\n\tstart_dt = dt.strptime(start_dt,'%m-%d-%y')\n\tend_dt = dt.strptime(end_dt,'%m-%d-%y')\n\n\tgas_elids = ['FT700','FT704']\n\ttemp_elids = ['AT304','AT310']\n\tinf_elid = 'FT202'\n\teff_elid = 'FT305'\n\n\t# Read in gas production and feeding volumes from HMI (or preprocess them if no volumes file has been provided)\n\tif gasprod_path:\n\t\tgasprod_dly = pd.read_csv(gasprod_path)\n\telif hmi_path:\n\t\thmi_gas = hmi.hmi_data_agg('raw','gas')\n\t\tgasprod_dly = hmi_gas.run_report(\n\t\t\t24,\n\t\t\tgas_elids,\n\t\t\t['total','total'],\n\t\t\tstart_dt_str,\n\t\t\tend_dt_str,\n\t\t\thmi_path = hmi_path,\n\t\t\toutput_csv = 1\n\t\t)\n\telse:\n\t\tprint('You need to give either a path to the file with HMI data or a summary report of processed HMI data')\n\t\tsys.exit()\n\t\t\t\n\tif feeding_path:\n\t\tfeeding_dly = pd.read_csv(feeding_path)\n\telif hmi_path:\n\t\thmi_water = hmi.hmi_data_agg('raw','water')\n\t\tfeeding_dly = hmi_water.run_report(\n\t\t\t24,\n\t\t\t[inf_elid, eff_elid],\n\t\t\t['total','total'],\n\t\t\tstart_dt_str,\n\t\t\tend_dt_str,\n\t\t\thmi_path = hmi_path,\n\t\t\toutput_csv = 1\n\t\t)\t\n\n\tif temp_path:\n\t\ttemp_dly = pd.read_csv(temp_path)\n\telif hmi_path:\n\t\thmi_temp = hmi.hmi_data_agg('raw','temp')\n\t\ttemp_dly = hmi_temp.run_report(\n\t\t\t24,\n\t\t\ttemp_elids,\n\t\t\t['average','average'],\n\t\t\tstart_dt_str,\n\t\t\tend_dt_str,\n\t\t\thmi_path = hmi_path,\n\t\t\toutput_csv = 1\n\t\t)\n\n\t# Get lab data from file on box and filter to desired dates\n\tlabdat = gld.get_data(['COD','GasComp'])\n\t\n\t# COD data\n\tcod_dat = labdat['COD']\n\tcod_dat['Date_Time'] = pd.to_datetime(cod_dat['Date_Time'])\n\tcod_dat = cod_dat.loc[\n\t\t(cod_dat['Date_Time'] >= start_dt) &\n\t\t(cod_dat['Date_Time'] <= end_dt)\n\t]\n\t# Drop duplicates\n\tcod_dat.drop_duplicates(keep = 'first', inplace = True)\n\t# Get average of multiple values taken on same day\n\tcod_dly = cod_dat.groupby(['Date_Time','Stage','Type']).mean()\n\t# Convert to wide to get COD in and out of the reactors\n\tcod_dly_wide = cod_dly.unstack(['Stage','Type'])\n\tcod_dly_wide['COD In'] = cod_dly_wide['Value']['Microscreen']['Total']\n\tcod_dly_wide['COD Out'] = cod_dly_wide['Value']['Duty AFMBR Effluent']['Soluble']\n\tcod_dly_wide.reset_index(inplace = True)\n\tcod_dly_wide['Date'] = cod_dly_wide['Date_Time'].dt.date\n\tcod_dly_clean = pd.DataFrame(\n\t\tcod_dly_wide[['Date','COD In','COD Out']].values,\n\t\tcolumns = ['Date','COD In','COD Out']\n\t)\n\n\t# Gas Composition Data\n\tgc_dat = labdat['GasComp']\n\tgc_dat['Date_Time'] = pd.to_datetime(gc_dat['Date_Time'])\n\tgc_dly = gc_dat.loc[\n\t\t(gc_dat['Date_Time'] >= start_dt) &\n\t\t(gc_dat['Date_Time'] <= end_dt) &\n\t\t(gc_dat['Type'].isin(['Methane (%)','Carbon Dioxide (%)']))\n\t]\n\tgc_dly = gc_dly.groupby(['Date_Time','Type']).mean()\n\tgc_dly_wide = gc_dly.unstack('Type')\n\tgc_dly_wide['CH4%'] = gc_dly_wide['Value']['Methane (%)']\n\tgc_dly_wide['CO2%'] = gc_dly_wide['Value']['Carbon Dioxide (%)']\n\tgc_dly_wide.reset_index(inplace = True)\n\tgc_dly_wide['Date'] = gc_dly_wide['Date_Time'].dt.date\n\tgc_dly_clean = pd.DataFrame(\n\t\tgc_dly_wide[['Date','CH4%','CO2%']].values,\n\t\tcolumns = ['Date','CH4%','CO2%']\n\t)\n\n\t# Gas Production HMI Data\n\tgasprod_dly['Time'] = pd.to_datetime(gasprod_dly['Time'])\n\tgasprod_dly['Date'] = gasprod_dly['Time'].dt.date\n\tgasprod_dly['Biogas Out'] = gasprod_dly['FT700_TOTAL'] + gasprod_dly['FT704_TOTAL']\n\tgasprod_dly_clean = gasprod_dly[['Date','Biogas Out']]\n\n\t# Feeding HMI Data\n\tfeeding_dly['Time'] = pd.to_datetime(feeding_dly['Time'])\n\tfeeding_dly['Date'] = feeding_dly['Time'].dt.date\n\tfeeding_dly['Flow In'] = feeding_dly[inf_elid + '_TOTAL']\n\tfeeding_dly['Flow Out'] = feeding_dly[eff_elid + '_TOTAL']\n\tfeeding_dly_clean = feeding_dly[['Date','Flow In','Flow Out']]\n\n\t# Reactor Temperature HMI data\n\ttemp_dly['Time'] = pd.to_datetime(temp_dly['Time'])\n\ttemp_dly['Date'] = temp_dly['Time'].dt.date\n\ttemp_dly['Reactor Temp'] = temp_dly[[elid + '_AVERAGE' for elid in temp_elids]].mean(axis = 1)\n\ttemp_dly_clean = temp_dly[['Date','Reactor Temp']]\n\n\t# List of all dataframes\n\tdfs_dly = [temp_dly_clean, feeding_dly_clean, gasprod_dly_clean, cod_dly_clean, gc_dly_clean]\n\n\t# Merge all datasets\n\tcod_bal_dly = functools.reduce(lambda left,right: pd.merge(left,right,on='Date'), dfs_dly)\n\tcod_bal_dly.dropna(axis = 0, how = 'any', inplace = True)\n\t\n\t# Get actual estimated methane output (L)\n\tcod_bal_dly['Est CH4 Prod'] = cod_bal_dly['Biogas Out']*cod_bal_dly['CH4%']/100\n\tcod_bal_dly['COD Consumed'] = cod_bal_dly['COD In'] - cod_bal_dly['COD Out']\n\n\t# Get theoretical estimated methane output\n\tgasprod_thry = []\n\tfor index,row in cod_bal_dly.iterrows():\n\t\tgasprod_thry.append(\n\t\t\tebg.get_biogas_prod(\n\t\t\t\tBODrem = row['COD Consumed'], \n\t\t\t\tinfSO4 = 4, \n\t\t\t\ttemp = row['Reactor Temp'], \n\t\t\t\tpercCH4 = row['CH4%']/100, \n\t\t\t\tpercCO2 = row['CO2%']/100, \n\t\t\t\t# This script takes units of m3/day\n\t\t\t\tflowrate = row['Flow Out']*0.00378541, \n\t\t\t\tprecision = 1E-6\n\t\t\t)\n\t\t)\n\n\tcod_bal_dly['Thr CH4 Prod'] = [row[0] for row in gasprod_thry]\n\tcod_bal_dly['Thr Biogas Prod'] = [row[1] for row in gasprod_thry]\n\tcod_bal_dly['CH4 Prod Ratio'] = \\\n\t\tcod_bal_dly['Est CH4 Prod']/cod_bal_dly['Thr CH4 Prod']\t\n\n\tdays_el = (cod_bal_dly['Date'] - cod_bal_dly['Date'][0])/np.timedelta64(24,'h')\n\tz = np.polyfit(\n\t\tdays_el, \n\t\tcod_bal_dly['CH4 Prod Ratio'], \n\t\t1\n\t)\n\tp = np.poly1d(z)\n\n\tcod_bal_dly.to_csv('C:/Users/jbolorinos/Google Drive/Codiga Center/Miscellany/balance.csv')\n\tfig, ax = plt.subplots()\n\tplt.plot(cod_bal_dly['Date'],cod_bal_dly['CH4 Prod Ratio'])\n\tplt.plot(cod_bal_dly['Date'],p(days_el),\"r--\")\n\tplt.ylabel('Ratio of Actual to Theoretical ' + r'$CH_4$' + ' production')\n\tlabels = ax.get_xticklabels()\n\tplt.setp(labels, rotation=30)\n\tplt.tight_layout()\n\tplt.show()\n\n\nget_cod_bal(\n\t'7-26-17',\n\t'9-26-17',\n\tgasprod_path = 'C:/Users/jbolorinos/Google Drive/Codiga Center/Miscellany/HMIGAS_FT700_FT704_07-26-17_09-26-17.csv',\n\tfeeding_path = 'C:/Users/jbolorinos/Google Drive/Codiga Center/Miscellany/HMIWATER_FT202_FT305_07-26-17_09-26-17.csv',\n\ttemp_path = 'C:/Users/jbolorinos/Google Drive/Codiga Center/Miscellany/HMITEMP_AT304_AT310_07-26-17_09-26-17.csv'\n\t# hmi_path = 'C:/Users/jbolorinos/Google Drive/Codiga Center/HMI Data/Reactor Feeding - Raw_20170927064036.csv'\n)\n\n\n","sub_path":"get_mass_balance.py","file_name":"get_mass_balance.py","file_ext":"py","file_size_in_byte":7153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"604535714","text":"import datetime\r\n\r\nfrom bars.base import BarsBase\r\n\r\n\r\nclass BarsMobile(BarsBase):\r\n def __init__(self, login: str, password: str, host: str):\r\n super().__init__(login, password, host)\r\n\r\n self.child = 0\r\n\r\n async def authorize(self, child_id: int = None) -> dict:\r\n response = await self.post(\r\n \"rest/login\", params={\"login\": self.login, \"password\": self.password}\r\n )\r\n json_response = await response.json()\r\n\r\n if child_id is not None:\r\n self.child = child_id\r\n else:\r\n self.child = json_response[\"childs\"][0][0]\r\n return json_response\r\n\r\n @staticmethod\r\n def format_date(datetime_object: datetime.datetime) -> str:\r\n return datetime_object.strftime(\"%d.%m.%Y\")\r\n\r\n async def logout(self):\r\n await self.get(\"rest/logout\")\r\n\r\n async def esia(self) -> dict:\r\n response = await self.get(\"rest/esia/mobile\")\r\n return await response.json()\r\n\r\n async def get_diary(\r\n self, from_date=datetime.datetime.today(), to_date=datetime.datetime.today(),\r\n ) -> dict:\r\n from_date = self.format_date(from_date)\r\n to_date = self.format_date(to_date)\r\n response = await self.post(\r\n \"rest/diary\",\r\n {\"pupil_id\": self.child, \"from_date\": from_date, \"to_date\": to_date},\r\n )\r\n return await response.json()\r\n\r\n async def get_summary_marks(self, date=datetime.datetime.today()) -> dict:\r\n date = self.format_date(date)\r\n response = await self.get(\r\n \"rest/progress_average\", {\"pupil_id\": self.child, \"date\": date}\r\n )\r\n return await response.json()\r\n\r\n async def get_total_marks(self, date=datetime.datetime.today()) -> dict:\r\n date = self.format_date(date)\r\n response = await self.get(\"rest/totals\", {\"pupil_id\": self.child, \"date\": date})\r\n return await response.json()\r\n\r\n async def get_meeting(self) -> dict:\r\n response = await self.get(\"rest/school_meetings\", {\"pupil_id\": self.child})\r\n return await response.json()\r\n\r\n async def get_additional_materials(self, lesson_id: int) -> dict:\r\n response = await self.get(\r\n \"rest/additional_materials\",\r\n {\"pupil_id\": self.child, \"lesson_id\": lesson_id},\r\n )\r\n return await response.json()\r\n\r\n async def get_lessons_scores(\r\n self, subject: str, date=datetime.datetime.today()\r\n ) -> dict:\r\n date = self.format_date(date)\r\n response = await self.get(\r\n \"rest/lessons_scores\",\r\n {\"pupil_id\": self.child, \"subject\": subject, \"date\": date},\r\n )\r\n return await response.json()\r\n\r\n async def get_progress_average(self, date=datetime.datetime.today()) -> dict:\r\n date = self.format_date(date)\r\n response = await self.get(\r\n \"rest/progress_average\", {\"pupil_id\": self.child, \"date\": date},\r\n )\r\n return await response.json()\r\n\r\n async def check_food(self) -> dict:\r\n response = await self.get(\"rest/check_food\", {\"pupil_id\": self.child},)\r\n return await response.json()\r\n\r\n async def authorization_food(self) -> dict:\r\n response = await self.get(\"rest/authorization_food\", {\"pupil_id\": self.child},)\r\n return await response.json()\r\n","sub_path":"bars/mobile.py","file_name":"mobile.py","file_ext":"py","file_size_in_byte":3329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"207996871","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader, Dataset\nfrom torchvision import transforms\n\nimport numpy as np\n\nimport os\nimport time\nfrom datetime import datetime\n\nfrom tqdm import tqdm\n\nimport sys\nfrom loguru import logger\n\n# Function used to choose computation device.\ndef set_device(\n var, \n use_cuda: bool\n):\n \"\"\"\n Gets tensor or nn variable ready for computation on a device.\n\n params:\n var: Tensor or model\n use_cuda: Whether to use GPU for computation or not\n\n returns:\n var: The same tensor or model, prepared for computation on the selected device\n \"\"\"\n if use_cuda == True:\n if torch.cuda.is_available():\n var = var.cuda()\n else:\n var = var.cpu()\n else:\n var = var.cpu()\n\n return var\n\n# Custom dataset used for augmentation\nclass augmented_data(Dataset):\n \"\"\"\n Augmented dataset\n\n This class inherits from PyTorch's Dataset class, which allows for overloading the initializer and getter to contain a transform operation.\n \"\"\"\n def __init__(self, data, transform):\n self.data = data\n self.transform = transform\n\n def __len__(self):\n return len(self.data.shape[0])\n\n def __getitem__(self, idx):\n item = self.data[idx]\n item = self.transform(item)\n return item\n\n# The network\nclass Net(object):\n \"\"\"\n The DeTraC model.\n\n params:\n pretrained_model: VGG, AlexNet or whatever other ImageNet pretrained model is chosen\n num_classes\n lr: Learning rate\n cuda: Choice of computation device (Whether to use GPU or not)\n mode: The DeTraC model contains 2 modes which are used depending on the case:\n - feature_extractor: used in the first phase of computation, where the pretrained model is used to extract the main features from the dataset\n - feature_composer: used in the last phase of computation, where the model is now training on the composed images, using the extracted features and clustering them.\n ckpt_dir\n labels: The text labels to be saved inside the model.\n \"\"\"\n print(\"Test 3\")\n def __init__(\n self,\n pretrained_model,\n num_classes: int,\n mode: str,\n ckpt_dir: str,\n cuda: bool = False,\n labels: list = [],\n lr: float = 0.0\n ):\n self.mode = mode\n self.model = pretrained_model\n self.num_classes = num_classes\n self.lr = lr\n self.cuda = cuda\n self.ckpt_dir = ckpt_dir\n self.labels = labels\n \n # Check whether the mode is correct\n assert self.mode == \"feature_extractor\" or self.mode == \"feature_composer\"\n\n # Check if checkpoint directory exists\n assert os.path.exists(ckpt_dir)\n\n # Prepare model for computation on the selected device\n self.model = set_device(self.model, self.cuda)\n\n # Extract the input size based on the second to last layer\n try:\n self.input_size = self.model.classifier[-1].in_features\n except:\n self.input_size = self.model.fc.in_features\n\n # Introduce a new layer of computation\n self.classification_layer = nn.Linear(\n in_features=self.input_size, \n out_features=self.num_classes\n )\n self.softmax_activation = nn.Softmax(dim=1)\n\n # Set the weights and biases accordingly\n with torch.no_grad():\n self.classification_layer.weight = torch.nn.Parameter(set_device(torch.randn((self.num_classes, self.input_size)) * 1e-5, self.cuda))\n self.classification_layer.bias = torch.nn.Parameter(set_device(torch.randn(self.num_classes) * 1e-5 + 1, self.cuda))\n\n # Prepare the layer for computation on the selected device\n self.classification_layer = set_device(nn.Sequential(\n self.classification_layer, \n self.softmax_activation\n ), self.cuda)\n\n # Replace the pretrained classification layer with the custom classification layer\n try:\n self.model.classifier[-1] = self.classification_layer\n \n # Total number of pretrained layers \n # (except last classification layer)\n self.num_pretrained_layers = len(list(self.model.modules())) - len(list(self.model.classifier))\n except:\n self.model.fc = self.classification_layer\n \n # Total number of pretrained layers \n # (except last fully connected layer)\n self.num_pretrained_layers = len(list(self.model.modules())) - len(list(self.model.fc))\n\n # Training mode\n # Number of layers to activate and freeze\n self.hm_layers_to_activate, self.hm_layers_to_freeze = 0, 0\n\n # The choice will only be given if it is \n # the feature extractor that it is training.\n if self.mode == \"feature_extractor\":\n # User choice\n while(True): #while loop to ask user for training model\n print(\"\"\"\n Choose a mode in which you wish to train:\\n\n 1) Shallow-tuning (Fast, but inaccurate)\\n\n 2) Deep-tuning (Slow and requires a lot of data, but accurate)\\n\n 3) Fine-tuning\n \"\"\")\n\n while(True): #prompt user to enter a valid number\n try: #if valid number is selected, it exits otherwise repeats\n self.training_mode = int(input(\"> \"))\n break\n except ValueError: #handle the exception if it is anything else than int\n logger.warning(\"Please enter an integer\")\n\n if(1 <= self.training_mode <= 3): #check if number is between 1 and 3, if so break \n break\n \n logger.info(f\"training_mode {self.training_mode} selected\") #add number to logging\n\n # If the user chose the fine-tuning method, \n # prepare the layers for freezing and training respectively\n if self.training_mode == 3:\n print(f\"Pretrained model has {self.num_pretrained_layers} layers.\")\n \n # How many layers to activate \n # (prepare their weights for gradient descent)\n self.hm_layers_to_activate = int(input(\"> How many layers to train?: \"))\n while self.hm_layers_to_activate < 0 and self.hm_layers_to_activate > self.num_pretrained_layers:\n self.hm_layers_to_activate = int(input(\"> How many layers to train?: \"))\n # How many layers to freeze\n # (how many to omit when executing gradient descent)\n self.hm_layers_to_freeze = self.num_pretrained_layers - self.hm_layers_to_activate\n else:\n self.hm_layers_to_freeze = self.num_pretrained_layers\n\n # Set the save path, freeze or unfreeze the gradients based on the mode and define appropriate optimizers and schedulers.\n # Feature extractor => Freeze all gradients except the custom classification layer\n # Feature composer => Unfreeze / Activate all gradients\n now = datetime.now()\n now = f'{str(now).split(\" \")[0]}_{str(now).split(\" \")[1]}'.split(\".\")[0].replace(':', \"-\")\n if self.mode == \"feature_extractor\":\n self.save_name = f\"DeTraC_feature_extractor_{now}.pth\"\n\n if self.training_mode == 1:\n print(\"Freezing all pretrained layers. Activating only classification layer\")\n for param in self.model.parameters():\n param.requires_grad = False\n try:\n for param in self.model.classifier[-1].parameters():\n param.requires_grad = True\n except:\n for param in self.model.fc.parameters():\n param.requires_grad = True\n\n elif self.training_mode == 2:\n print(\"Activating all layers\")\n for param in self.model.parameters():\n param.requires_grad = True\n\n else:\n print(f\"Freezing {self.hm_layers_to_freeze} layers and activating {self.hm_layers_to_activate}.\")\n for i, param in enumerate(self.model.parameters()):\n if i <= self.hm_layers_to_freeze:\n param.requires_grad = False\n else:\n param.requires_grad = True\n try:\n for param in self.model.classifier[-1].parameters():\n param.requires_grad = True\n except:\n for param in self.model.fc.parameters():\n param.requires_grad = True \n\n self.optimizer = optim.SGD(\n params=self.model.parameters(),\n lr=self.lr,\n momentum=0.9,\n nesterov=False,\n weight_decay=1e-3\n )\n\n self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(\n optimizer=self.optimizer,\n factor=0.9,\n patience=3\n )\n\n else:\n self.save_name = f\"DeTraC_feature_composer_{now}.pth\"\n\n for param in self.model.parameters():\n param.requires_grad = True\n\n self.optimizer = optim.SGD(\n params=self.model.parameters(),\n lr=self.lr,\n momentum=0.95,\n nesterov=False,\n weight_decay=1e-4\n )\n\n self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(\n optimizer=self.optimizer,\n factor=0.95,\n patience=5\n )\n\n self.ckpt_path = os.path.join(self.ckpt_dir, self.save_name)\n\n # Define the loss.\n # Categorical crossentropy is a negative log likelihood loss, where the logit is the log of the model's output, and the label is the argmax from the list of labels\n self.criterion = nn.NLLLoss()\n\n def save(\n self, \n epoch: int, \n train_loss: float, \n train_acc: float, \n val_loss: float, \n val_acc: float\n ):\n \"\"\"\n Save the model's gradients, as well as the optimizer's latent gradients.\n Also save some additional data, such as epoch, loss and accuracy.\n\n params:\n epoch\n train_loss\n train_acc\n val_loss\n val_acc\n \"\"\"\n torch.save({\n \"model_state_dict\": self.model.state_dict(),\n \"optimizer_state_dict\": self.optimizer.state_dict(),\n \"epoch\": epoch,\n \"train_loss\": train_loss,\n \"train_loss\": train_acc,\n \"val_loss\": val_loss,\n \"val_acc\": val_acc,\n \"labels\": self.labels,\n \"num_classes\": self.num_classes\n }, self.ckpt_path)\n\n def load_model_for_inference(\n self, \n ckpt_path: str\n ):\n \"\"\"\n Loads the model's state for inference.\n\n params:\n ckpt_path: Model's path\n \"\"\"\n checkpoint = torch.load(ckpt_path, map_location=lambda storage, loc: storage)\n self.model.load_state_dict(checkpoint['model_state_dict'])\n \n def load(\n self, \n *args\n ) -> list:\n \"\"\"\n Load the model on GPU or CPU.\n\n params:\n args\n\n returns:\n loaded_args: The arguments used in the model's checkpoint, used to load the model. \n \"\"\"\n # Prompt the user\n prompt = input(\"Load on GPU or CPU [GPU / CPU]\\n\")\n while prompt != \"GPU\" and prompt != \"CPU\":\n prompt = input(\"Load on GPU or CPU? [GPU / CPUs]\\n\")\n\n print(\"Loading checkpoint\")\n # Load model on selected computation device\n if prompt == \"CPU\":\n checkpoint = torch.load(\n self.ckpt_path, map_location=lambda storage, loc: storage)\n self.cuda = False\n else:\n checkpoint = torch.load(self.ckpt_path)\n self.cuda = True\n\n # List of arguments to load\n loaded_args = []\n loaded_model = False\n for arg in args:\n if arg == \"model_state_dict\":\n if loaded_model == False:\n print(\"Loading model state\")\n self.model.load_state_dict(checkpoint[arg])\n loaded_model = True\n else:\n try:\n print(f\"Loading {arg}\")\n loaded_args.append(checkpoint[arg])\n except:\n print(f\"{arg} does not exist in model checkpoint.\")\n\n if len(loaded_args) != 0:\n return loaded_args\n\n def load_labels_for_inference(\n self, \n ckpt_path: str\n ):\n \"\"\"\n Load labels from the model's checkpoint \n \"\"\"\n\n checkpoint = torch.load(\n ckpt_path, map_location=lambda storage, loc: storage)\n return checkpoint['labels']\n\n def train_step(\n self, \n train_loader: DataLoader\n ) -> [float, float]:\n \"\"\"\n Model's training step.\n\n params:\n train_loader: Training dataset containing the features and labels, shuffle and batched appropriately.\n\n returns:\n err: The training loss at that step\n acc: The training accuracy at that step\n \"\"\"\n # Set model in train mode\n self.model.train()\n\n # Initialize running error and running accuracy metrics\n running_error = 0.0\n running_correct = 0\n\n # Iterate through the data\n for features, labels in train_loader:\n if self.mode == \"feature_extractor\":\n # Here we permute because pretrained models in Pytorch require inputs of shape (batch_size, num_channels, width, height)\n features = features.permute(0, 3, 1, 2)\n\n # Load data to the desired computation device\n features = set_device(features, self.cuda)\n labels = set_device(labels, self.cuda)\n\n # Reset the optimizer's gradients\n self.optimizer.zero_grad()\n\n # Enable gradients for gradient descent\n with torch.set_grad_enabled(True):\n # Prediction\n pred = self.model(features)\n\n # Cross entropy loss\n loss = self.criterion(torch.log(pred), torch.max(labels, 1)[1])\n\n # Backprop\n loss.backward()\n\n # Optimizer step\n self.optimizer.step()\n\n # Labels for metrics\n _, pred_idx = torch.max(pred.data, 1)\n _, true_idx = torch.max(labels.data, 1)\n\n # Metrics calculations\n running_error += loss.item() * features.size(0)\n running_correct += (pred_idx == true_idx).float().sum()\n\n # Total error\n err = running_error / len(train_loader.dataset)\n \n # Total accuracy\n acc = 100 * running_correct / len(train_loader.dataset)\n\n return err, acc\n\n def validation_step(\n self, \n validation_loader: DataLoader\n ) -> [float, float]:\n \"\"\"\n Model's validation step.\n\n params:\n validation_loader: Validation dataset containing the features and labels, shuffle and batched appropriately.\n\n returns:\n err: The validation loss at that step\n acc: The validation accuracy at that step\n \"\"\"\n\n # Set model in evaluation mode\n self.model.eval()\n\n # Initialize running error and running accuracy metrics\n running_error = 0.0\n running_correct = 0\n\n # Iterate through the data\n for features, labels in validation_loader:\n if self.mode == \"feature_extractor\":\n features = features.permute(0, 3, 1, 2)\n\n # Load data to the desired computation device\n features = set_device(features, self.cuda)\n labels = set_device(labels, self.cuda)\n\n # Reset the optimizer's gradients\n self.optimizer.zero_grad()\n\n # Disable gradients as there is no training done in the validation step\n with torch.no_grad():\n # Prediction\n pred = self.model(features)\n\n # Cross-entropy loss\n loss = self.criterion(torch.log(pred), torch.max(labels, 1)[1])\n\n # Labels for metrics\n _, pred_idx = torch.max(pred.data, 1)\n _, true_idx = torch.max(labels.data, 1)\n\n # Metrics calculations\n running_error += loss.item() * features.size(0)\n running_correct += (pred_idx == true_idx).float().sum().item()\n\n # Total error\n err = running_error / len(validation_loader.dataset)\n \n # Total accuracy\n acc = 100 * running_correct / len(validation_loader.dataset)\n\n return err, acc\n\n def fit(\n self,\n x_train: np.ndarray,\n y_train: np.ndarray,\n x_test: np.ndarray,\n y_test: np.ndarray,\n epochs: int,\n batch_size: int,\n resume: bool\n ):\n \"\"\"\n The model's fit process.\n\n params:\n x_train\n y_train\n x_test\n y_test\n epochs\n batch_size\n save: Whether to save progress or not\n resume: Whether to resume training or not\n \"\"\"\n if self.cuda == True:\n torch.backends.cudnn.benchmark = True\n\n # Mode choice\n if self.mode == \"feature_extractor\":\n train_loader = DataLoader(dataset=list(zip(\n x_train, y_train)), shuffle=True, batch_size=batch_size, num_workers=2, pin_memory=True)\n validation_loader = DataLoader(dataset=list(zip(\n x_test, y_test)), shuffle=True, batch_size=batch_size, num_workers=2, pin_memory=True)\n else:\n # We want to augment the data when in feature composition mode.\n train_transform = transforms.Compose([\n transforms.ToPILImage(\"RGB\"),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize(\n [0.485, 0.456, 0.406], \n [0.229, 0.224, 0.225]\n )\n ])\n\n val_transform = transforms.Compose([\n transforms.ToPILImage(\"RGB\"),\n transforms.ToTensor(),\n transforms.Normalize(\n [0.485, 0.456, 0.406], \n [0.229, 0.224, 0.225]\n )\n ])\n\n x_train_ds = augmented_data(x_train, train_transform)\n x_test_ds = augmented_data(x_test, val_transform)\n\n train_loader = DataLoader(\n dataset=list(zip(x_train_ds, y_train)), \n shuffle=True, \n batch_size=batch_size\n )\n validation_loader = DataLoader(\n dataset=list(zip(x_test_ds, y_test)), \n shuffle=True, \n batch_size=batch_size\n )\n\n start_epoch = 0\n # If the user chooses to resume\n if resume == True:\n\n # List of checkpoints\n ckpt_paths_list = []\n for i, ckpt_path in enumerate(os.listdir(self.ckpt_dir)):\n if self.mode == \"feature_extractor\":\n if \"feature_extractor\" in ckpt_path:\n print(f\"{i + 1}) {ckpt_path}\")\n ckpt_paths_list.append(ckpt_path)\n else:\n if \"feature_composer\" in ckpt_path:\n print(f\"{i + 1}) {ckpt_path}\")\n ckpt_paths_list.append(ckpt_path)\n\n # Check if there are any available checkpoints\n assert len(ckpt_paths_list > 0)\n\n # Prompt user for a choice\n ckpt_path_choice = -1\n while ckpt_path_choice > len(model_paths_list) or ckpt_path_choice < 1:\n ckpt_path_choice = int(input(f\"Which model would you like to load? [Number between 1 and {len(ckpt_paths_list)}]: \"))\n\n ckpt_path = os.path.join(\n self.ckpt_dir, ckpt_paths_list[ckpt_path_choice - 1])\n\n # Load said checkpoint and it's elements.\n checkpoint = torch.load(self.ckpt_path)\n start_epoch = checkpoint['epoch']\n self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n self.model.load_state_dict(checkpoint['model_state_dict'])\n\n # Training loop\n progress_bar = tqdm(range(start_epoch, epochs))\n progress_bar.set_description(\"Resuming training...\")\n for epoch in progress_bar:\n train_loss, train_acc = self.train_step(train_loader)\n val_loss, val_acc = self.validation_step(validation_loader)\n\n # Regularization\n self.scheduler.step(val_loss)\n\n # Save condition\n if epochs >= 10:\n if (epoch + 1) % (epochs // 10) == 0:\n self.save(epoch, train_loss,\n train_acc, val_loss, val_acc)\n else:\n self.save(epoch, train_loss,\n train_acc, val_loss, val_acc)\n\n progress_bar.set_description(\n f\"[Epoch {epoch + 1} stats]: train_loss = {train_loss} | train_acc = {train_acc}% | val_loss = {val_loss} | val_acc = {val_acc}%\")\n\n else:\n progress_bar = tqdm(range(start_epoch, epochs))\n progress_bar.set_description(\"Starting training...\")\n\n for epoch in progress_bar:\n train_loss, train_acc = self.train_step(train_loader)\n val_loss, val_acc = self.validation_step(validation_loader)\n \n # Regularization\n self.scheduler.step(val_loss)\n\n # Save condition\n if epochs >= 10:\n if (epoch + 1) % (epochs // 10) == 0:\n self.save(epoch, train_loss,\n train_acc, val_loss, val_acc)\n else:\n self.save(epoch, train_loss,\n train_acc, val_loss, val_acc)\n\n progress_bar.set_description(\n f\"[Epoch {epoch + 1} stats]: train_loss = {train_loss:.2f} | train_acc = {train_acc:.2f}% | val_loss = {val_loss:.2f} | val_acc = {val_acc:.2f}%\")\n\n def infer(\n self, \n input_data: np.ndarray, \n ckpt_path: bool = None, \n use_labels: bool = False\n ) -> dict or np.ndarray:\n \"\"\"\n The model's inference process.\n\n params:\n input_data\n ckpt_path: Model's path\n use_labels: Whether to output nicely, with a description of the labels, or not\n returns:\n prediction\n \"\"\"\n # Disable gradients. We're not training.\n with torch.no_grad():\n # Convert the input data into a tensor if needed\n if type(input_data) != torch.Tensor:\n input_data = torch.Tensor(input_data)\n\n # Reshape if appropriately.\n input_data = input_data.reshape(-1, 3, 224, 224)\n\n # Prediction\n output = self.model.cpu()(input_data).numpy()\n\n if use_labels == True:\n\n # Check if a model path is given\n assert ckpt_path != None\n\n # Create a dictionary of the shape: